aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-09-03 03:23:41 -0400
committerIngo Molnar <mingo@kernel.org>2019-09-03 03:23:41 -0400
commitae1ad26388228048db6a5f1056bd569ed2bbc4ec (patch)
tree223f50677aa00eb6f2a6529099a1005c7e43c071
parentc84b82dd3e593db217f23c60f7edae02c76a3c4c (diff)
parent089cf7f6ecb266b6a4164919a2e69bd2f938374a (diff)
Merge tag 'v5.3-rc7' into x86/mm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--.gitignore3
-rw-r--r--.mailmap8
-rw-r--r--Documentation/PCI/index.rst2
-rw-r--r--Documentation/PCI/pci-error-recovery.rst5
-rw-r--r--Documentation/PCI/pciebus-howto.rst (renamed from Documentation/PCI/picebus-howto.rst)0
-rw-r--r--Documentation/RCU/rculist_nulls.txt2
-rw-r--r--Documentation/admin-guide/conf.py10
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst88
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt17
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst2
-rw-r--r--Documentation/admin-guide/sysctl/net.rst29
-rw-r--r--Documentation/conf.py30
-rw-r--r--Documentation/core-api/conf.py10
-rw-r--r--Documentation/crypto/conf.py10
-rw-r--r--Documentation/dev-tools/conf.py10
-rw-r--r--Documentation/devicetree/bindings/Makefile4
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml2
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt16
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt30
-rw-r--r--Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml (renamed from Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml)2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt1
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt30
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt4
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml4
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml45
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.txt81
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml93
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.txt162
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml16
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt6
-rw-r--r--Documentation/doc-guide/conf.py10
-rw-r--r--Documentation/driver-api/80211/conf.py10
-rw-r--r--Documentation/driver-api/conf.py10
-rw-r--r--Documentation/driver-api/generic-counter.rst4
-rw-r--r--Documentation/driver-api/phy/phy.rst4
-rw-r--r--Documentation/driver-api/pm/conf.py10
-rw-r--r--Documentation/filesystems/cifs/TODO26
-rw-r--r--Documentation/filesystems/conf.py10
-rw-r--r--Documentation/gpu/conf.py10
-rw-r--r--Documentation/hwmon/k8temp.rst2
-rw-r--r--Documentation/index.rst3
-rw-r--r--Documentation/input/conf.py10
-rw-r--r--Documentation/kernel-hacking/conf.py10
-rw-r--r--Documentation/locking/spinlocks.rst4
-rw-r--r--Documentation/maintainer/conf.py10
-rw-r--r--Documentation/media/conf.py12
-rw-r--r--Documentation/memory-barriers.txt2
-rw-r--r--Documentation/networking/conf.py10
-rw-r--r--Documentation/networking/tls-offload.rst41
-rw-r--r--Documentation/networking/tuntap.txt4
-rw-r--r--Documentation/power/index.rst2
-rw-r--r--Documentation/powerpc/bootwrapper.rst (renamed from Documentation/powerpc/bootwrapper.txt)28
-rw-r--r--Documentation/powerpc/cpu_families.rst (renamed from Documentation/powerpc/cpu_families.txt)23
-rw-r--r--Documentation/powerpc/cpu_features.rst (renamed from Documentation/powerpc/cpu_features.txt)6
-rw-r--r--Documentation/powerpc/cxl.rst (renamed from Documentation/powerpc/cxl.txt)46
-rw-r--r--Documentation/powerpc/cxlflash.rst (renamed from Documentation/powerpc/cxlflash.txt)10
-rw-r--r--Documentation/powerpc/dawr-power9.rst (renamed from Documentation/powerpc/DAWR-POWER9.txt)15
-rw-r--r--Documentation/powerpc/dscr.rst (renamed from Documentation/powerpc/dscr.txt)18
-rw-r--r--Documentation/powerpc/eeh-pci-error-recovery.rst (renamed from Documentation/powerpc/eeh-pci-error-recovery.txt)108
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.rst (renamed from Documentation/powerpc/firmware-assisted-dump.txt)117
-rw-r--r--Documentation/powerpc/hvcs.rst (renamed from Documentation/powerpc/hvcs.txt)108
-rw-r--r--Documentation/powerpc/index.rst34
-rw-r--r--Documentation/powerpc/isa-versions.rst15
-rw-r--r--Documentation/powerpc/mpc52xx.rst (renamed from Documentation/powerpc/mpc52xx.txt)12
-rw-r--r--Documentation/powerpc/pci_iov_resource_on_powernv.rst (renamed from Documentation/powerpc/pci_iov_resource_on_powernv.txt)15
-rw-r--r--Documentation/powerpc/pmu-ebb.rst (renamed from Documentation/powerpc/pmu-ebb.txt)1
-rw-r--r--Documentation/powerpc/ptrace.rst156
-rw-r--r--Documentation/powerpc/ptrace.txt151
-rw-r--r--Documentation/powerpc/qe_firmware.rst (renamed from Documentation/powerpc/qe_firmware.txt)37
-rw-r--r--Documentation/powerpc/syscall64-abi.rst (renamed from Documentation/powerpc/syscall64-abi.txt)29
-rw-r--r--Documentation/powerpc/transactional_memory.rst (renamed from Documentation/powerpc/transactional_memory.txt)45
-rw-r--r--Documentation/process/conf.py10
-rw-r--r--Documentation/process/deprecated.rst14
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst279
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/s390/vfio-ccw.rst31
-rw-r--r--Documentation/sh/conf.py10
-rw-r--r--Documentation/sound/conf.py10
-rw-r--r--Documentation/sphinx/load_config.py27
-rw-r--r--Documentation/translations/it_IT/doc-guide/sphinx.rst19
-rw-r--r--Documentation/translations/it_IT/process/index.rst1
-rw-r--r--Documentation/translations/it_IT/process/kernel-docs.rst11
-rw-r--r--Documentation/translations/it_IT/process/maintainer-pgp-guide.rst25
-rw-r--r--Documentation/translations/it_IT/process/programming-language.rst51
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt2
-rw-r--r--Documentation/userspace-api/conf.py10
-rw-r--r--Documentation/virt/index.rst (renamed from Documentation/virtual/index.rst)0
-rw-r--r--Documentation/virt/kvm/amd-memory-encryption.rst (renamed from Documentation/virtual/kvm/amd-memory-encryption.rst)0
-rw-r--r--Documentation/virt/kvm/api.txt (renamed from Documentation/virtual/kvm/api.txt)2
-rw-r--r--Documentation/virt/kvm/arm/hyp-abi.txt (renamed from Documentation/virtual/kvm/arm/hyp-abi.txt)0
-rw-r--r--Documentation/virt/kvm/arm/psci.txt (renamed from Documentation/virtual/kvm/arm/psci.txt)0
-rw-r--r--Documentation/virt/kvm/cpuid.rst (renamed from Documentation/virtual/kvm/cpuid.rst)0
-rw-r--r--Documentation/virt/kvm/devices/README (renamed from Documentation/virtual/kvm/devices/README)0
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-its.txt (renamed from Documentation/virtual/kvm/devices/arm-vgic-its.txt)0
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-v3.txt (renamed from Documentation/virtual/kvm/devices/arm-vgic-v3.txt)0
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic.txt (renamed from Documentation/virtual/kvm/devices/arm-vgic.txt)0
-rw-r--r--Documentation/virt/kvm/devices/mpic.txt (renamed from Documentation/virtual/kvm/devices/mpic.txt)0
-rw-r--r--Documentation/virt/kvm/devices/s390_flic.txt (renamed from Documentation/virtual/kvm/devices/s390_flic.txt)0
-rw-r--r--Documentation/virt/kvm/devices/vcpu.txt (renamed from Documentation/virtual/kvm/devices/vcpu.txt)0
-rw-r--r--Documentation/virt/kvm/devices/vfio.txt (renamed from Documentation/virtual/kvm/devices/vfio.txt)0
-rw-r--r--Documentation/virt/kvm/devices/vm.txt (renamed from Documentation/virtual/kvm/devices/vm.txt)0
-rw-r--r--Documentation/virt/kvm/devices/xics.txt (renamed from Documentation/virtual/kvm/devices/xics.txt)0
-rw-r--r--Documentation/virt/kvm/devices/xive.txt (renamed from Documentation/virtual/kvm/devices/xive.txt)0
-rw-r--r--Documentation/virt/kvm/halt-polling.txt (renamed from Documentation/virtual/kvm/halt-polling.txt)0
-rw-r--r--Documentation/virt/kvm/hypercalls.txt (renamed from Documentation/virtual/kvm/hypercalls.txt)4
-rw-r--r--Documentation/virt/kvm/index.rst (renamed from Documentation/virtual/kvm/index.rst)1
-rw-r--r--Documentation/virt/kvm/locking.txt (renamed from Documentation/virtual/kvm/locking.txt)0
-rw-r--r--Documentation/virt/kvm/mmu.txt (renamed from Documentation/virtual/kvm/mmu.txt)2
-rw-r--r--Documentation/virt/kvm/msr.txt (renamed from Documentation/virtual/kvm/msr.txt)0
-rw-r--r--Documentation/virt/kvm/nested-vmx.txt (renamed from Documentation/virtual/kvm/nested-vmx.txt)0
-rw-r--r--Documentation/virt/kvm/ppc-pv.txt (renamed from Documentation/virtual/kvm/ppc-pv.txt)0
-rw-r--r--Documentation/virt/kvm/review-checklist.txt (renamed from Documentation/virtual/kvm/review-checklist.txt)2
-rw-r--r--Documentation/virt/kvm/s390-diag.txt (renamed from Documentation/virtual/kvm/s390-diag.txt)0
-rw-r--r--Documentation/virt/kvm/timekeeping.txt (renamed from Documentation/virtual/kvm/timekeeping.txt)0
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst (renamed from Documentation/virtual/kvm/vcpu-requests.rst)0
-rw-r--r--Documentation/virt/paravirt_ops.rst (renamed from Documentation/virtual/paravirt_ops.rst)0
-rw-r--r--Documentation/virt/uml/UserModeLinux-HOWTO.txt (renamed from Documentation/virtual/uml/UserModeLinux-HOWTO.txt)0
-rw-r--r--Documentation/vm/conf.py10
-rw-r--r--Documentation/vm/hmm.rst2
-rw-r--r--Documentation/watchdog/hpwdt.rst2
-rw-r--r--Documentation/x86/conf.py10
-rw-r--r--MAINTAINERS142
-rw-r--r--Makefile28
-rw-r--r--arch/arc/boot/dts/Makefile3
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/include/asm/mach_desc.h3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/unwind.c5
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arc/plat-hsdk/platform.c87
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/Kconfig.debug5
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi16
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi32
-rw-r--r--arch/arm/boot/dts/am4372.dtsi32
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi3
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revc.dts7
-rw-r--r--arch/arm/boot/dts/bcm47094-linksys-panamera.dts3
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi6
-rw-r--r--arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi50
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul-geam.dts2
-rw-r--r--arch/arm/boot/dts/imx6ul-isiot.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-hobbit.dts2
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-pi.dts4
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi2
-rw-r--r--arch/arm/boot/dts/vf610-bk4.dts4
-rw-r--r--arch/arm/configs/u8500_defconfig34
-rw-r--r--arch/arm/include/asm/dma-mapping.h4
-rw-r--r--arch/arm/kernel/hw_breakpoint.c5
-rw-r--r--arch/arm/kernel/signal.c1
-rw-r--r--arch/arm/kvm/coproc.c23
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/mach-davinci/sleep.S1
-rw-r--r--arch/arm/mach-ep93xx/crunch.c1
-rw-r--r--arch/arm/mach-netx/Kconfig22
-rw-r--r--arch/arm/mach-netx/Makefile13
-rw-r--r--arch/arm/mach-netx/Makefile.boot3
-rw-r--r--arch/arm/mach-netx/fb.c65
-rw-r--r--arch/arm/mach-netx/fb.h12
-rw-r--r--arch/arm/mach-netx/generic.c182
-rw-r--r--arch/arm/mach-netx/generic.h14
-rw-r--r--arch/arm/mach-netx/include/mach/hardware.h27
-rw-r--r--arch/arm/mach-netx/include/mach/irqs.h58
-rw-r--r--arch/arm/mach-netx/include/mach/netx-regs.h420
-rw-r--r--arch/arm/mach-netx/include/mach/pfifo.h42
-rw-r--r--arch/arm/mach-netx/include/mach/uncompress.h63
-rw-r--r--arch/arm/mach-netx/include/mach/xc.h30
-rw-r--r--arch/arm/mach-netx/nxdb500.c197
-rw-r--r--arch/arm/mach-netx/nxdkn.c90
-rw-r--r--arch/arm/mach-netx/nxeb500hmi.c174
-rw-r--r--arch/arm/mach-netx/pfifo.c56
-rw-r--r--arch/arm/mach-netx/time.c141
-rw-r--r--arch/arm/mach-netx/xc.c246
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S3
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c4
-rw-r--r--arch/arm/mach-omap2/omap4-common.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c3
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/dma-mapping.c59
-rw-r--r--arch/arm/mm/init.c13
-rw-r--r--arch/arm/plat-omap/dma.c14
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi3
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/daifflags.h2
-rw-r--r--arch/arm64/include/asm/efi.h6
-rw-r--r--arch/arm64/include/asm/elf.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h7
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/include/asm/processor.h14
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h78
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h40
-rw-r--r--arch/arm64/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c22
-rw-r--r--arch/arm64/kernel/debug-monitors.c14
-rw-r--r--arch/arm64/kernel/entry.S22
-rw-r--r--arch/arm64/kernel/fpsimd.c29
-rw-r--r--arch/arm64/kernel/ftrace.c22
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c11
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/perf_callchain.c7
-rw-r--r--arch/arm64/kernel/probes/kprobes.c40
-rw-r--r--arch/arm64/kernel/process.c36
-rw-r--r--arch/arm64/kernel/return_address.c12
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c62
-rw-r--r--arch/arm64/kernel/time.c7
-rw-r--r--arch/arm64/kernel/traps.c14
-rw-r--r--arch/arm64/kernel/vdso/Makefile13
-rw-r--r--arch/arm64/kernel/vdso32/Makefile14
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c30
-rw-r--r--arch/arm64/kvm/regmap.c5
-rw-r--r--arch/arm64/kvm/sys_regs.c32
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c57
-rw-r--r--arch/csky/include/uapi/asm/byteorder.h2
-rw-r--r--arch/csky/include/uapi/asm/cachectl.h2
-rw-r--r--arch/csky/include/uapi/asm/perf_regs.h2
-rw-r--r--arch/csky/include/uapi/asm/ptrace.h2
-rw-r--r--arch/csky/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/csky/include/uapi/asm/unistd.h2
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h1
-rw-r--r--arch/mips/kernel/cacheinfo.c2
-rw-r--r--arch/mips/kernel/i8253.c3
-rw-r--r--arch/mips/kvm/emulate.c1
-rw-r--r--arch/mips/kvm/mips.c10
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c13
-rw-r--r--arch/mips/pci/ops-bcm63xx.c1
-rw-r--r--arch/mips/vdso/vdso.h1
-rw-r--r--arch/nds32/include/uapi/asm/auxvec.h2
-rw-r--r--arch/nds32/include/uapi/asm/byteorder.h2
-rw-r--r--arch/nds32/include/uapi/asm/cachectl.h2
-rw-r--r--arch/nds32/include/uapi/asm/fp_udfiex_crtl.h2
-rw-r--r--arch/nds32/include/uapi/asm/param.h2
-rw-r--r--arch/nds32/include/uapi/asm/ptrace.h2
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h2
-rw-r--r--arch/nds32/kernel/signal.c2
-rw-r--r--arch/parisc/Makefile5
-rw-r--r--arch/parisc/boot/compressed/Makefile4
-rw-r--r--arch/parisc/boot/compressed/vmlinux.lds.S4
-rw-r--r--arch/parisc/configs/defconfig (renamed from arch/parisc/configs/default_defconfig)0
-rw-r--r--arch/parisc/include/asm/kprobes.h4
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/parisc/kernel/ftrace.c3
-rw-r--r--arch/parisc/kernel/pacache.S3
-rw-r--r--arch/parisc/math-emu/Makefile1
-rw-r--r--arch/parisc/mm/fault.c1
-rw-r--r--arch/powerpc/include/asm/cache.h8
-rw-r--r--arch/powerpc/include/asm/hvcall.h11
-rw-r--r--arch/powerpc/include/asm/pmc.h5
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/powerpc/kernel/align.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S8
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c5
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_xive.c4
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c15
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c9
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c7
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c73
-rw-r--r--arch/powerpc/sysdev/xive/common.c7
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi16
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts9
-rw-r--r--arch/riscv/configs/defconfig12
-rw-r--r--arch/riscv/configs/rv32_defconfig3
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/fixmap.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h12
-rw-r--r--arch/riscv/include/asm/switch_to.h8
-rw-r--r--arch/riscv/include/asm/tlbflush.h11
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h2
-rw-r--r--arch/riscv/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/riscv/include/uapi/asm/byteorder.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwcap.h2
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h2
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h2
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h1
-rw-r--r--arch/riscv/kernel/process.c11
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/riscv/lib/delay.c6
-rw-r--r--arch/riscv/lib/udivdi3.S32
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/boot.h2
-rw-r--r--arch/s390/boot/head.S1
-rw-r--r--arch/s390/boot/ipl_parm.c2
-rw-r--r--arch/s390/boot/kaslr.c1
-rw-r--r--arch/s390/boot/version.c7
-rw-r--r--arch/s390/configs/debug_defconfig330
-rw-r--r--arch/s390/configs/defconfig233
-rw-r--r--arch/s390/configs/zfcpdump_defconfig31
-rw-r--r--arch/s390/hypfs/hypfs_vm.c4
-rw-r--r--arch/s390/include/asm/bitops.h73
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/s390/include/uapi/asm/ipl.h2
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h35
-rw-r--r--arch/s390/kernel/dumpstack.c6
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ipl.c9
-rw-r--r--arch/s390/kernel/machine_kexec_reloc.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/kvm/kvm-s390.c10
-rw-r--r--arch/s390/lib/xor.c1
-rw-r--r--arch/s390/mm/dump_pagetables.c12
-rw-r--r--arch/s390/mm/fault.c3
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/s390/net/bpf_jit_comp.c12
-rw-r--r--arch/s390/scripts/Makefile.chkbss3
-rw-r--r--arch/sh/include/uapi/asm/setup.h2
-rw-r--r--arch/sh/include/uapi/asm/types.h2
-rw-r--r--arch/sh/kernel/disassemble.c5
-rw-r--r--arch/sh/kernel/hw_breakpoint.c1
-rw-r--r--arch/sparc/include/uapi/asm/oradax.h2
-rw-r--r--arch/um/include/shared/timer-internal.h14
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/time.c16
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c13
-rw-r--r--arch/x86/boot/string.c8
-rw-r--r--arch/x86/entry/calling.h17
-rw-r--r--arch/x86/entry/entry_32.S13
-rw-r--r--arch/x86/entry/entry_64.S21
-rw-r--r--arch/x86/events/amd/ibs.c13
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c15
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/include/asm/bootparam_utils.h63
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/intel-family.h15
-rw-r--r--arch/x86/include/asm/kvm_host.h10
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/perf_event.h12
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h36
-rw-r--r--arch/x86/include/uapi/asm/byteorder.h2
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h2
-rw-r--r--arch/x86/include/uapi/asm/sigcontext32.h2
-rw-r--r--arch/x86/include/uapi/asm/types.h2
-rw-r--r--arch/x86/kernel/apic/apic.c72
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c24
-rw-r--r--arch/x86/kernel/apic/io_apic.c8
-rw-r--r--arch/x86/kernel/apic/probe_32.c3
-rw-r--r--arch/x86/kernel/cpu/amd.c66
-rw-r--r--arch/x86/kernel/cpu/bugs.c107
-rw-r--r--arch/x86/kernel/cpu/common.c44
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/umwait.c39
-rw-r--r--arch/x86/kernel/head_64.S8
-rw-r--r--arch/x86/kernel/hpet.c12
-rw-r--r--arch/x86/kernel/kvm.c8
-rw-r--r--arch/x86/kernel/ptrace.c1
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/sysfb_efi.c46
-rw-r--r--arch/x86/kernel/uprobes.c17
-rw-r--r--arch/x86/kvm/debugfs.c46
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c13
-rw-r--r--arch/x86/kvm/mmu.c35
-rw-r--r--arch/x86/kvm/svm.c28
-rw-r--r--arch/x86/kvm/vmx/nested.c4
-rw-r--r--arch/x86/kvm/vmx/vmx.c20
-rw-r--r--arch/x86/kvm/x86.c41
-rw-r--r--arch/x86/lib/cpu.c1
-rw-r--r--arch/x86/math-emu/errors.c5
-rw-r--r--arch/x86/math-emu/fpu_trig.c2
-rw-r--r--arch/x86/mm/fault.c15
-rw-r--r--arch/x86/mm/pageattr.c26
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/x86/power/cpu.c86
-rw-r--r--arch/x86/purgatory/Makefile36
-rw-r--r--arch/x86/purgatory/purgatory.c6
-rw-r--r--arch/x86/purgatory/string.c23
-rw-r--r--arch/xtensa/kernel/coprocessor.S1
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--block/bfq-iosched.c135
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-iolatency.c3
-rw-r--r--block/blk-mq-sched.h9
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-rq-qos.c7
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/acpi/arm64/iort.c4
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/nfit/core.c28
-rw-r--r--drivers/acpi/nfit/nfit.h24
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/android/binder.c5
-rw-r--r--drivers/ata/libahci_platform.c4
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/ata/libata-zpodd.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c1
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/atm/iphase.c8
-rw-r--r--drivers/auxdisplay/Kconfig5
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h (renamed from include/misc/charlcd.h)5
-rw-r--r--drivers/auxdisplay/hd44780.c3
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/core.c83
-rw-r--r--drivers/base/firmware_loader/firmware.h4
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/block/aoe/aoedev.c13
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c14
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/bluetooth/btqca.c29
-rw-r--r--drivers/bluetooth/btqca.h7
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/hci_ath.c3
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_intel.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c13
-rw-r--r--drivers/bluetooth/hci_mrvl.c3
-rw-r--r--drivers/bluetooth/hci_qca.c12
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/hisi_lpc.c47
-rw-r--r--drivers/bus/ti-sysc.c24
-rw-r--r--drivers/char/hpet.c3
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c43
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm1-cmd.c36
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/clk/at91/clk-generated.c2
-rw-r--r--drivers/clk/clk.c49
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c46
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c162
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c2
-rw-r--r--drivers/clk/sprd/Kconfig1
-rw-r--r--drivers/clocksource/timer-riscv.c6
-rw-r--r--drivers/connector/connector.c6
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c23
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c14
-rw-r--r--drivers/crypto/ccp/ccp-dev.c8
-rw-r--r--drivers/crypto/ccp/ccp-ops.c33
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c6
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-iso.c2
-rw-r--r--drivers/firewire/core-topology.c1
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c38
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/fpga/Kconfig1
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fsi/fsi-scom.c8
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c59
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c125
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c211
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c62
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c65
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/drm_client.c60
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c38
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c59
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h76
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c55
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c4
-rw-r--r--drivers/hid/hid-a4tech.c30
-rw-r--r--drivers/hid/hid-cp2112.c8
-rw-r--r--drivers/hid/hid-holtek-kbd.c9
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-logitech-hidpp.c14
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-sony.c15
-rw-r--r--drivers/hid/hid-tmff.c12
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c12
-rw-r--r--drivers/hid/wacom_wac.c19
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hv_trace.h2
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/nct6775.c3
-rw-r--r--drivers/hwmon/nct7802.c6
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c1
-rw-r--r--drivers/hwtracing/intel_th/msu.h2
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/intel_th/pti.h2
-rw-r--r--drivers/hwtracing/stm/core.c1
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c2
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c9
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c1
-rw-r--r--drivers/i2c/busses/i2c-emev2.c16
-rw-r--r--drivers/i2c/busses/i2c-i801.c15
-rw-r--r--drivers/i2c/busses/i2c-imx.c18
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/busses/i2c-rcar.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32.h2
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c1
-rw-r--r--drivers/iio/adc/ingenic-adc.c54
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c4
-rw-r--r--drivers/iio/frequency/adf4371.c8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c43
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/core_priv.h5
-rw-r--r--drivers/infiniband/core/counters.c27
-rw-r--r--drivers/infiniband/core/device.c102
-rw-r--r--drivers/infiniband/core/mad.c20
-rw-r--r--drivers/infiniband/core/nldev.c11
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c119
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/Kconfig6
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h15
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c51
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c48
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c37
-rw-r--r--drivers/infiniband/hw/qedr/main.c10
-rw-r--r--drivers/infiniband/sw/siw/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/siw.h10
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c116
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c22
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c56
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c5
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/applespi.c29
-rw-r--r--drivers/input/mouse/elantech.c55
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/serio/hyperv-keyboard.c35
-rw-r--r--drivers/input/tablet/kbtab.c6
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c90
-rw-r--r--drivers/iommu/amd_iommu_types.h9
-rw-r--r--drivers/iommu/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/dma-iommu.c28
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c6
-rw-r--r--drivers/iommu/intel-iommu.c200
-rw-r--r--drivers/iommu/iova.c23
-rw-r--r--drivers/iommu/virtio-iommu.c40
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c13
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c20
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-dust.c11
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c68
-rw-r--r--drivers/md/dm-zoned-reclaim.c47
-rw-r--r--drivers/md/dm-zoned-target.c68
-rw-r--r--drivers/md/dm-zoned.h11
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/media/platform/vivid/vivid-core.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/rk808.c6
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/eeprom/Kconfig3
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/habanalabs/command_submission.c2
-rw-r--r--drivers/misc/habanalabs/device.c5
-rw-r--r--drivers/misc/habanalabs/firmware_if.c22
-rw-r--r--drivers/misc/habanalabs/goya/goya.c83
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/habanalabs.h25
-rw-r--r--drivers/misc/habanalabs/hw_queue.c14
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h13
-rw-r--r--drivers/misc/habanalabs/irq.c27
-rw-r--r--drivers/misc/habanalabs/memory.c2
-rw-r--r--drivers/misc/lkdtm/bugs.c4
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/vmw_balloon.c10
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c6
-rw-r--r--drivers/mmc/core/queue.c5
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/host/cavium.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c5
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c33
-rw-r--r--drivers/mmc/host/sdhci-tegra.c14
-rw-r--r--drivers/mmc/host/sdhci-xenon.c2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/hyperbus/Kconfig4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c14
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5
-rw-r--r--drivers/net/arcnet/arc-rimi.c3
-rw-r--r--drivers/net/arcnet/com20020-isa.c6
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/at91_can.c6
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c39
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c9
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c52
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c29
-rw-r--r--drivers/net/dsa/qca8k.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c147
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c7
-rw-r--r--drivers/net/ethernet/8390/Kconfig4
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c10
-rw-r--r--drivers/net/ethernet/apple/Kconfig4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/my3126.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c31
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c87
-rw-r--r--drivers/net/ethernet/marvell/sky2.c14
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c155
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c23
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/ni/Kconfig2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/packetengines/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c13
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c19
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c7
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/ethernet/xscale/Kconfig2
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c10
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/netdevsim/dev.c63
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/netdev.c9
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/fixed_phy.c6
-rw-r--r--drivers/net/phy/mscc.c16
-rw-r--r--drivers/net/phy/phy-c45.c40
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c18
-rw-r--r--drivers/net/phy/phy_led_triggers.c3
-rw-r--r--drivers/net/phy/phylink.c10
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/ppp/pppox.c13
-rw-r--r--drivers/net/ppp/pptp.c3
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c9
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/kalmia.c6
-rw-r--r--drivers/net/usb/lan78xx.c8
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c22
-rw-r--r--drivers/net/vrf.c58
-rw-r--r--drivers/net/wan/sdla.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c539
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nfc/nfcmrvl/main.c4
-rw-r--r--drivers/nfc/nfcmrvl/uart.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c1
-rw-r--r--drivers/nfc/st-nci/se.c2
-rw-r--r--drivers/nfc/st21nfca/se.c2
-rw-r--r--drivers/ntb/msi.c5
-rw-r--r--drivers/nvdimm/btt_devs.c16
-rw-r--r--drivers/nvdimm/bus.c210
-rw-r--r--drivers/nvdimm/core.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c4
-rw-r--r--drivers/nvdimm/namespace_devs.c36
-rw-r--r--drivers/nvdimm/nd-core.h71
-rw-r--r--drivers/nvdimm/pfn_devs.c24
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region.c24
-rw-r--r--drivers/nvdimm/region_devs.c12
-rw-r--r--drivers/nvme/host/core.c41
-rw-r--r--drivers/nvme/host/multipath.c79
-rw-r--r--drivers/nvme/host/nvme.h32
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h3
-rw-r--r--drivers/nvmem/nvmem-sysfs.c15
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/pci/pci.c29
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/portdrv_core.c66
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c4
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c92
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c2
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h5
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c6
-rw-r--r--drivers/platform/x86/intel_pmc_core.c1
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c6
-rw-r--r--drivers/power/supply/ab8500_charger.c1
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/powercap/powercap_sys.c2
-rw-r--r--drivers/pwm/core.c7
-rw-r--r--drivers/regulator/axp20x-regulator.c10
-rw-r--r--drivers/regulator/lp87565-regulator.c8
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/tape_core.c3
-rw-r--r--drivers/s390/cio/qdio_main.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c2
-rw-r--r--drivers/s390/crypto/ap_queue.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c17
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c140
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c2
-rw-r--r--drivers/soc/ixp4xx/Kconfig4
-rw-r--r--drivers/soc/ti/pm33xx.c19
-rw-r--r--drivers/soundwire/Kconfig7
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/spi/spi-bcm2835.c3
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-gpio.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c3
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c4
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c2
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c47
-rw-r--r--drivers/staging/gasket/apex_driver.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c3
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/netx-serial.c733
-rw-r--r--drivers/tty/tty_ldsem.c5
-rw-r--r--drivers/tty/vt/vt.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/chipidea/udc.c32
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usbtmc.c3
-rw-r--r--drivers/usb/core/buffer.c10
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/file.c10
-rw-r--r--drivers/usb/core/hcd-pci.c30
-rw-r--r--drivers/usb/core/hcd.c131
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/sysfs.c121
-rw-r--r--drivers/usb/core/usb.h5
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c28
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/host/hwa-hc.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/pci-quirks.c45
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-rcar.c11
-rw-r--r--drivers/usb/host/xhci-tegra.c10
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/iowarrior.c7
-rw-r--r--drivers/usb/misc/rio500.c43
-rw-r--r--drivers/usb/misc/usb251xb.c15
-rw-r--r--drivers/usb/misc/yurex.c2
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c15
-rw-r--r--drivers/usb/storage/scsiglue.c11
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c60
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c8
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c35
-rw-r--r--drivers/xen/swiotlb-xen.c34
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c3
-rw-r--r--drivers/xen/xlate_mmu.c32
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/cmservice.c10
-rw-r--r--fs/afs/dir.c92
-rw-r--r--fs/afs/file.c12
-rw-r--r--fs/afs/fsclient.c51
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--fs/afs/yfsclient.c56
-rw-r--r--fs/block_dev.c86
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c71
-rw-r--r--fs/btrfs/inode.c24
-rw-r--r--fs/btrfs/locking.c9
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/send.c77
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/volumes.c23
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/inode.c7
-rw-r--r--fs/ceph/locks.c3
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/cifssmb.c197
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/misc.c22
-rw-r--r--fs/cifs/sess.c26
-rw-r--r--fs/cifs/smb2ops.c39
-rw-r--r--fs/cifs/smb2pdu.c7
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/coredump.c44
-rw-r--r--fs/dax.c4
-rw-r--r--fs/exec.c2
-rw-r--r--fs/f2fs/file.c63
-rw-r--r--fs/f2fs/gc.c70
-rw-r--r--fs/f2fs/super.c48
-rw-r--r--fs/gfs2/bmap.c179
-rw-r--r--fs/io_uring.c162
-rw-r--r--fs/iomap/Makefile2
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/delegation.c25
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c27
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/fscache.c7
-rw-r--r--fs/nfs/fscache.h2
-rw-r--r--fs/nfs/inode.c33
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/nfs4proc.c109
-rw-r--r--fs/nfs/nfs4state.c49
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs.c7
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/super.c1
-rw-r--r--fs/nfs/write.c38
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/open.c19
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/super.c5
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c29
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c3
-rw-r--r--fs/xfs/scrub/dabtree.c6
-rw-r--r--fs/xfs/xfs_ioctl32.c56
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--fs/xfs/xfs_log.c5
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c63
-rw-r--r--include/asm-generic/5level-fixup.h21
-rw-r--r--include/asm-generic/futex.h21
-rw-r--r--include/asm-generic/getorder.h50
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_mode_config.h7
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/blk-cgroup.h1
-rw-r--r--include/linux/ccp.h2
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/clk.h1
-rw-r--r--include/linux/connector.h1
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/device.h6
-rw-r--r--include/linux/dim.h56
-rw-r--r--include/linux/dma-contiguous.h5
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/filter.h13
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/gpio/consumer.h64
-rw-r--r--include/linux/hmm.h54
-rw-r--r--include/linux/if_pppox.h3
-rw-r--r--include/linux/if_rmnet.h4
-rw-r--r--include/linux/iova.h6
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/kvm_host.h6
-rw-r--r--include/linux/logic_pio.h1
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h11
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_asn1.h3
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h5
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/page-flags-layout.h18
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/numa_balancing.h4
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/skmsg.h8
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/wait.h13
-rw-r--r--include/math-emu/op-common.h5
-rw-r--r--include/net/act_api.h4
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/flow_offload.h30
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h12
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h1
-rw-r--r--include/net/netfilter/nf_tables.h14
-rw-r--r--include/net/netfilter/nf_tables_offload.h2
-rw-r--r--include/net/netlink.h5
-rw-r--r--include/net/nexthop.h6
-rw-r--r--include/net/pkt_cls.h7
-rw-r--r--include/net/psample.h1
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sch_generic.h8
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/tc_act/tc_police.h4
-rw-r--r--include/net/tc_act/tc_sample.h2
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/net/tls.h13
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdmavt_qp.h9
-rw-r--r--include/rdma/restrack.h3
-rw-r--r--include/scsi/libfc.h52
-rw-r--r--include/scsi/libfcoe.h3
-rw-r--r--include/soc/arc/mcip.h11
-rw-r--r--include/soc/fsl/qe/qe.h2
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/simple_card_utils.h4
-rw-r--r--include/sound/sof/control.h2
-rw-r--r--include/sound/sof/dai-intel.h2
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/info.h2
-rw-r--r--include/sound/sof/pm.h2
-rw-r--r--include/sound/sof/stream.h2
-rw-r--r--include/sound/sof/topology.h2
-rw-r--r--include/sound/sof/trace.h2
-rw-r--r--include/sound/sof/xtensa.h2
-rw-r--r--include/trace/events/dma_fence.h2
-rw-r--r--include/trace/events/napi.h4
-rw-r--r--include/trace/events/qdisc.h4
-rw-r--r--include/trace/events/rxrpc.h65
-rw-r--r--include/trace/events/tegra_apb_dma.h4
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/bpfilter.h2
-rw-r--r--include/uapi/linux/ipmi_bmc.h2
-rw-r--r--include/uapi/linux/isst_if.h2
-rw-r--r--include/uapi/linux/jffs2.h5
-rw-r--r--include/uapi/linux/kfd_ioctl.h20
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/netfilter/nf_synproxy.h2
-rw-r--r--include/uapi/linux/netfilter/xt_connlabel.h6
-rw-r--r--include/uapi/linux/netfilter/xt_nfacct.h5
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--include/uapi/linux/rds.h2
-rw-r--r--include/uapi/linux/rxrpc.h2
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/socket.h19
-rw-r--r--include/uapi/linux/usb/g_uvc.h2
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h2
-rw-r--r--include/uapi/linux/vboxguest.h2
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/virtio_iommu.h32
-rw-r--r--include/uapi/linux/virtio_pmem.h2
-rw-r--r--include/uapi/linux/vmcore.h2
-rw-r--r--include/uapi/linux/wmi.h2
-rw-r--r--include/uapi/misc/fastrpc.h2
-rw-r--r--include/uapi/rdma/rvt-abi.h2
-rw-r--r--include/uapi/rdma/siw-abi.h5
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h2
-rw-r--r--include/uapi/sound/skl-tplg-interface.h2
-rw-r--r--include/uapi/sound/sof/fw.h16
-rw-r--r--include/uapi/sound/sof/header.h14
-rw-r--r--include/xen/xen-ops.h3
-rw-r--r--kernel/Kconfig.preempt8
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/core.c8
-rw-r--r--kernel/bpf/syscall.c30
-rw-r--r--kernel/bpf/verifier.c13
-rw-r--r--kernel/configs.c16
-rw-r--r--kernel/cred.c21
-rw-r--r--kernel/dma/contiguous.c16
-rw-r--r--kernel/dma/direct.c20
-rw-r--r--kernel/dma/mapping.c32
-rw-r--r--kernel/dma/remap.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/irq/affinity.c6
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/kallsyms.c6
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/locking/lockdep.c13
-rw-r--r--kernel/locking/lockdep_proc.c3
-rw-r--r--kernel/locking/mutex.c11
-rw-r--r--kernel/locking/rwsem.c28
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/cpufreq_schedutil.c14
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/fair.c144
-rw-r--r--kernel/sched/psi.c12
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/vsyscall.c22
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c26
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_functions_graph.c17
-rw-r--r--kernel/trace/trace_probe.c3
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Makefile3
-rw-r--r--lib/dim/dim.c4
-rw-r--r--lib/dim/net_dim.c56
-rw-r--r--lib/kfifo.c3
-rw-r--r--lib/logic_pio.c73
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/test_firmware.c5
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/vdso/gettimeofday.c79
-rw-r--r--mm/Makefile1
-rw-r--r--mm/balloon_compaction.c69
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/hmm.c10
-rw-r--r--mm/huge_memory.c55
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/kasan/common.c10
-rw-r--r--mm/kmemleak.c4
-rw-r--r--mm/memcontrol.c146
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c134
-rw-r--r--mm/memremap.c (renamed from kernel/memremap.c)30
-rw-r--r--mm/migrate.c21
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slub.c8
-rw-r--r--mm/usercopy.c2
-rw-r--r--mm/vmalloc.c21
-rw-r--r--mm/vmscan.c27
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/z3fold.c104
-rw-r--r--mm/zsmalloc.c80
-rw-r--r--net/batman-adv/bat_iv_ogm.c20
-rw-r--r--net/batman-adv/bat_v_ogm.c18
-rw-r--r--net/batman-adv/multicast.c8
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_debugfs.c31
-rw-r--r--net/bluetooth/hidp/core.c9
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/bridge/br_vlan.c29
-rw-r--r--net/bridge/netfilter/Kconfig6
-rw-r--r--net/bridge/netfilter/ebtables.c40
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c12
-rw-r--r--net/can/gw.c48
-rw-r--r--net/ceph/crypto.c6
-rw-r--r--net/ceph/osd_client.c9
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/filter.c14
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/flow_offload.c22
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/skmsg.c4
-rw-r--r--net/core/sock.c50
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/core/sock_map.c19
-rw-r--r--net/core/stream.c16
-rw-r--r--net/dsa/slave.c6
-rw-r--r--net/dsa/switch.c3
-rw-r--r--net/dsa/tag_8021q.c2
-rw-r--r--net/dsa/tag_sja1105.c12
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ieee802154/socket.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_fragment.c41
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c12
-rw-r--r--net/ipv4/route.c17
-rw-r--r--net/ipv4/tcp.c33
-rw-r--r--net/ipv4/tcp_bpf.c6
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv4/tcp_ulp.c13
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/iucv/af_iucv.c14
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/cfg.c17
-rw-r--r--net/mac80211/driver-ops.c13
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mac80211/util.c7
-rw-r--r--net/mpls/mpls_iptunnel.c8
-rw-r--r--net/ncsi/ncsi-cmd.c13
-rw-r--r--net/ncsi/ncsi-rsp.c9
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c2
-rw-r--r--net/netfilter/nf_conntrack_amanda.c2
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c20
-rw-r--r--net/netfilter/nf_conntrack_expect.c26
-rw-r--r--net/netfilter/nf_conntrack_ftp.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c5
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c18
-rw-r--r--net/netfilter/nf_conntrack_irc.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/nf_conntrack_pptp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c8
-rw-r--r--net/netfilter/nf_conntrack_sane.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c10
-rw-r--r--net/netfilter/nf_conntrack_standalone.c5
-rw-r--r--net/netfilter/nf_conntrack_tftp.c2
-rw-r--r--net/netfilter/nf_flow_table_core.c43
-rw-r--r--net/netfilter/nf_flow_table_ip.c44
-rw-r--r--net/netfilter/nf_nat_amanda.c2
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nf_nat_ftp.c2
-rw-r--r--net/netfilter/nf_nat_irc.c2
-rw-r--r--net/netfilter/nf_nat_sip.c8
-rw-r--r--net/netfilter/nf_nat_tftp.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c8
-rw-r--r--net/netfilter/nf_tables_api.c23
-rw-r--r--net/netfilter/nf_tables_offload.c22
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nft_chain_filter.c2
-rw-r--r--net/netfilter/nft_chain_nat.c3
-rw-r--r--net/netfilter/nft_ct.c2
-rw-r--r--net/netfilter/nft_flow_offload.c15
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/nft_meta.c18
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_synproxy.c2
-rw-r--r--net/netfilter/xt_nfacct.c36
-rw-r--r--net/netfilter/xt_physdev.c6
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/openvswitch/conntrack.c20
-rw-r--r--net/openvswitch/datapath.c15
-rw-r--r--net/openvswitch/flow.c168
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c8
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/psample/psample.c2
-rw-r--r--net/rds/ib.c16
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/rdma_transport.c15
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/rxrpc/ar-internal.h26
-rw-r--r--net/rxrpc/call_event.c23
-rw-r--r--net/rxrpc/call_object.c33
-rw-r--r--net/rxrpc/conn_client.c44
-rw-r--r--net/rxrpc/conn_event.c6
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/input.c359
-rw-r--r--net/rxrpc/local_event.c4
-rw-r--r--net/rxrpc/local_object.c104
-rw-r--r--net/rxrpc/output.c9
-rw-r--r--net/rxrpc/peer_event.c12
-rw-r--r--net/rxrpc/peer_object.c18
-rw-r--r--net/rxrpc/protocol.h9
-rw-r--r--net/rxrpc/recvmsg.c53
-rw-r--r--net/rxrpc/rxkad.c32
-rw-r--r--net/rxrpc/sendmsg.c14
-rw-r--r--net/rxrpc/skbuff.c40
-rw-r--r--net/sched/act_bpf.c11
-rw-r--r--net/sched/act_connmark.c11
-rw-r--r--net/sched/act_csum.c11
-rw-r--r--net/sched/act_ct.c11
-rw-r--r--net/sched/act_ctinfo.c11
-rw-r--r--net/sched/act_gact.c10
-rw-r--r--net/sched/act_ife.c15
-rw-r--r--net/sched/act_ipt.c11
-rw-r--r--net/sched/act_mirred.c15
-rw-r--r--net/sched/act_mpls.c10
-rw-r--r--net/sched/act_nat.c11
-rw-r--r--net/sched/act_pedit.c12
-rw-r--r--net/sched/act_police.c10
-rw-r--r--net/sched/act_sample.c18
-rw-r--r--net/sched/act_simple.c12
-rw-r--r--net/sched/act_skbedit.c25
-rw-r--r--net/sched/act_skbmod.c13
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c27
-rw-r--r--net/sched/cls_api.c16
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_matchall.c2
-rw-r--r--net/sched/cls_u32.c6
-rw-r--r--net/sched/sch_cbs.c19
-rw-r--r--net/sched/sch_codel.c6
-rw-r--r--net/sched/sch_generic.c19
-rw-r--r--net/sched/sch_taprio.c34
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/smc/smc_tx.c6
-rw-r--r--net/sunrpc/clnt.c47
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/tipc/addr.c1
-rw-r--r--net/tipc/link.c92
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tipc/netlink_compat.c11
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/tls/tls_device.c9
-rw-r--r--net/tls/tls_main.c99
-rw-r--r--net/tls/tls_sw.c83
-rw-r--r--net/vmw_vsock/hyperv_transport.c8
-rw-r--r--net/wireless/core.c6
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/util.c50
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--samples/auxdisplay/cfag12864b-example.c2
-rw-r--r--samples/vfio-mdev/mdpy-defs.h2
-rw-r--r--scripts/Kbuild.include3
-rw-r--r--scripts/Kconfig.include2
-rw-r--r--scripts/Makefile.build11
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/Makefile.modpost86
-rw-r--r--scripts/coccinelle/api/atomic_as_refcounter.cocci1
-rwxr-xr-xscripts/gen_compile_commands.py4
-rwxr-xr-xscripts/headers_install.sh6
-rw-r--r--scripts/kconfig/confdata.c4
-rwxr-xr-xscripts/link-vmlinux.sh2
-rwxr-xr-xscripts/sphinx-pre-install118
-rw-r--r--security/Kconfig.hardening7
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/trusted.c13
-rw-r--r--security/selinux/ss/policydb.c6
-rw-r--r--security/selinux/ss/sidtab.c5
-rw-r--r--sound/ac97/bus.c13
-rw-r--r--sound/core/compress_offload.c60
-rw-r--r--sound/core/pcm_native.c12
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_fifo.c17
-rw-r--r--sound/core/seq/seq_fifo.h2
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/firewire/packets-buffer.c2
-rw-r--r--sound/hda/hdac_i915.c10
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_controller.c13
-rw-r--r--sound/pci/hda/hda_controller.h2
-rw-r--r--sound/pci/hda/hda_generic.c21
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c71
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c33
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c20
-rw-r--r--sound/soc/codecs/cs42xx8.c116
-rw-r--r--sound/soc/codecs/max98357a.c25
-rw-r--r--sound/soc/codecs/max98373.c6
-rw-r--r--sound/soc/codecs/max98373.h2
-rw-r--r--sound/soc/codecs/pcm3060-i2c.c4
-rw-r--r--sound/soc/codecs/pcm3060-spi.c4
-rw-r--r--sound/soc/codecs/pcm3060.c4
-rw-r--r--sound/soc/codecs/pcm3060.h2
-rw-r--r--sound/soc/codecs/rt1011.c4
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/rt1308.c0
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/rt1308.h0
-rw-r--r--sound/soc/generic/audio-graph-card.c30
-rw-r--r--sound/soc/generic/simple-card-utils.c7
-rw-r--r--sound/soc/generic/simple-card.c26
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c8
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hda-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-skl-match.c2
-rw-r--r--sound/soc/qcom/apq8016_sbc.c16
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c5
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c32
-rw-r--r--sound/soc/samsung/odroid.c8
-rw-r--r--sound/soc/soc-core.c7
-rw-r--r--sound/soc/soc-dapm.c10
-rw-r--r--sound/soc/sof/intel/cnl.c4
-rw-r--r--sound/soc/sof/intel/hda-ipc.c4
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c4
-rw-r--r--sound/soc/ti/davinci-mcasp.c46
-rw-r--r--sound/sound_core.c3
-rw-r--r--sound/usb/helper.c2
-rw-r--r--sound/usb/hiface/pcm.c11
-rw-r--r--sound/usb/line6/pcm.c18
-rw-r--r--sound/usb/line6/podhd.c2
-rw-r--r--sound/usb/line6/variax.c2
-rw-r--r--sound/usb/mixer.c73
-rw-r--r--sound/usb/mixer_quirks.c8
-rw-r--r--sound/usb/pcm.c1
-rw-r--r--sound/usb/stream.c1
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h10
-rw-r--r--tools/arch/powerpc/include/uapi/asm/mman.h4
-rw-r--r--tools/arch/sparc/include/uapi/asm/mman.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h22
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/bpf/bpftool/common.c8
-rw-r--r--tools/bpf/bpftool/prog.c4
-rwxr-xr-xtools/hv/hv_get_dhcp_info.sh2
-rw-r--r--tools/hv/hv_kvp_daemon.c10
-rwxr-xr-xtools/hv/hv_set_ifconfig.sh2
-rw-r--r--tools/hv/hv_vss_daemon.c4
-rw-r--r--tools/hv/lsvmbus75
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h15
-rw-r--r--tools/include/uapi/asm-generic/mman.h10
-rw-r--r--tools/include/uapi/asm-generic/unistd.h8
-rw-r--r--tools/include/uapi/drm/drm.h1
-rw-r--r--tools/include/uapi/drm/i915_drm.h209
-rw-r--r--tools/include/uapi/linux/bpf.h11
-rw-r--r--tools/include/uapi/linux/if_link.h5
-rw-r--r--tools/include/uapi/linux/kvm.h7
-rw-r--r--tools/include/uapi/linux/sched.h30
-rw-r--r--tools/include/uapi/linux/usbdevice_fs.h26
-rw-r--r--tools/lib/bpf/btf.c5
-rw-r--r--tools/lib/bpf/hashmap.h5
-rw-r--r--tools/lib/bpf/libbpf.c67
-rw-r--r--tools/lib/bpf/xsk.c11
-rw-r--r--tools/objtool/check.c7
-rw-r--r--tools/objtool/check.h3
-rw-r--r--tools/perf/Documentation/Makefile2
-rw-r--r--tools/perf/Documentation/perf-script.txt8
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt2
-rw-r--r--tools/perf/arch/s390/util/machine.c31
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/bench/numa.c6
-rw-r--r--tools/perf/builtin-ftrace.c2
-rw-r--r--tools/perf/builtin-probe.c10
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c9
-rw-r--r--tools/perf/pmu-events/jevents.c1
-rwxr-xr-xtools/perf/trace/beauty/usbdevfs_ioctl.sh9
-rw-r--r--tools/perf/ui/browser.c9
-rw-r--r--tools/perf/ui/tui/progress.c2
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/cpumap.c5
-rw-r--r--tools/perf/util/evsel.c2
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/session.c22
-rw-r--r--tools/perf/util/session.h1
-rw-r--r--tools/perf/util/stat-shadow.c3
-rw-r--r--tools/perf/util/symbol.c7
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread.c12
-rw-r--r--tools/perf/util/zstd.c4
-rw-r--r--tools/power/x86/turbostat/Makefile3
-rw-r--r--tools/power/x86/turbostat/turbostat.c101
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile3
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.82
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c28
-rw-r--r--tools/scripts/Makefile.include9
-rwxr-xr-xtools/testing/ktest/config-bisect.pl4
-rw-r--r--tools/testing/selftests/bpf/Makefile9
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c3
-rw-r--r--tools/testing/selftests/bpf/test_btf_dump.c7
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock.c7
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh57
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh9
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c11
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c28
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh4
-rwxr-xr-xtools/testing/selftests/kmod/kmod.sh6
-rw-r--r--tools/testing/selftests/kselftest.h15
-rw-r--r--tools/testing/selftests/kvm/.gitignore3
-rw-r--r--tools/testing/selftests/kvm/config3
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c12
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c32
-rw-r--r--tools/testing/selftests/livepatch/functions.sh46
-rw-r--r--tools/testing/selftests/net/.gitignore4
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh28
-rwxr-xr-xtools/testing/selftests/net/tcp_fastopen_backup_key.sh2
-rw-r--r--tools/testing/selftests/net/tls.c223
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh48
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c6
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json47
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json94
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c2
-rw-r--r--usr/include/Makefile4
-rw-r--r--virt/kvm/arm/arm.c20
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c8
-rw-r--r--virt/kvm/arm/mmio.c7
-rw-r--r--virt/kvm/arm/pmu.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic.c18
-rw-r--r--virt/kvm/arm/vgic/vgic.h6
-rw-r--r--virt/kvm/kvm_main.c61
1923 files changed, 19269 insertions, 13040 deletions
diff --git a/.gitignore b/.gitignore
index 8f5422cba6e2..2030c7a4d2f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -142,3 +142,6 @@ x509.genkey
142 142
143# Kdevelop4 143# Kdevelop4
144*.kdev4 144*.kdev4
145
146# Clang's compilation database file
147/compile_commands.json
diff --git a/.mailmap b/.mailmap
index 0fef932de3db..afaad605284a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> 64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> 65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
67Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
68Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
69Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
67Domen Puncer <domen@coderock.org> 70Domen Puncer <domen@coderock.org>
68Douglas Gilbert <dougg@torque.net> 71Douglas Gilbert <dougg@torque.net>
69Ed L. Cashin <ecashin@coraid.com> 72Ed L. Cashin <ecashin@coraid.com>
@@ -98,6 +101,7 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
98Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> 101Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
99<javier@osg.samsung.com> <javier.martinez@collabora.co.uk> 102<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
100Jean Tourrilhes <jt@hpl.hp.com> 103Jean Tourrilhes <jt@hpl.hp.com>
104<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
101Jeff Garzik <jgarzik@pretzel.yyz.us> 105Jeff Garzik <jgarzik@pretzel.yyz.us>
102Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com> 106Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
103Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net> 107Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
@@ -116,6 +120,7 @@ John Stultz <johnstul@us.ibm.com>
116Juha Yrjola <at solidboot.com> 120Juha Yrjola <at solidboot.com>
117Juha Yrjola <juha.yrjola@nokia.com> 121Juha Yrjola <juha.yrjola@nokia.com>
118Juha Yrjola <juha.yrjola@solidboot.com> 122Juha Yrjola <juha.yrjola@solidboot.com>
123Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
119Kay Sievers <kay.sievers@vrfy.org> 124Kay Sievers <kay.sievers@vrfy.org>
120Kenneth W Chen <kenneth.w.chen@intel.com> 125Kenneth W Chen <kenneth.w.chen@intel.com>
121Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 126Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
@@ -132,6 +137,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
132Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org> 137Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
133Li Yang <leoyang.li@nxp.com> <leoli@freescale.com> 138Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
134Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> 139Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
140Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
135Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> 141Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
136Mark Brown <broonie@sirena.org.uk> 142Mark Brown <broonie@sirena.org.uk>
137Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com> 143Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
@@ -157,6 +163,8 @@ Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.co
157Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> 163Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
158Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> 164Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
159Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> 165Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
166Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
167Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
160Mayuresh Janorkar <mayur@ti.com> 168Mayuresh Janorkar <mayur@ti.com>
161Michael Buesch <m@bues.ch> 169Michael Buesch <m@bues.ch>
162Michel Dänzer <michel@tungstengraphics.com> 170Michel Dänzer <michel@tungstengraphics.com>
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index f4c6121868c3..6768305e4c26 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -9,7 +9,7 @@ Linux PCI Bus Subsystem
9 :numbered: 9 :numbered:
10 10
11 pci 11 pci
12 picebus-howto 12 pciebus-howto
13 pci-iov-howto 13 pci-iov-howto
14 msi-howto 14 msi-howto
15 acpi-info 15 acpi-info
diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst
index 83db42092935..e5d450df06b4 100644
--- a/Documentation/PCI/pci-error-recovery.rst
+++ b/Documentation/PCI/pci-error-recovery.rst
@@ -403,7 +403,7 @@ That is, the recovery API only requires that:
403.. note:: 403.. note::
404 404
405 Implementation details for the powerpc platform are discussed in 405 Implementation details for the powerpc platform are discussed in
406 the file Documentation/powerpc/eeh-pci-error-recovery.txt 406 the file Documentation/powerpc/eeh-pci-error-recovery.rst
407 407
408 As of this writing, there is a growing list of device drivers with 408 As of this writing, there is a growing list of device drivers with
409 patches implementing error recovery. Not all of these patches are in 409 patches implementing error recovery. Not all of these patches are in
@@ -422,3 +422,6 @@ That is, the recovery API only requires that:
422 - drivers/net/cxgb3 422 - drivers/net/cxgb3
423 - drivers/net/s2io.c 423 - drivers/net/s2io.c
424 - drivers/net/qlge 424 - drivers/net/qlge
425
426The End
427-------
diff --git a/Documentation/PCI/picebus-howto.rst b/Documentation/PCI/pciebus-howto.rst
index f882ff62c51f..f882ff62c51f 100644
--- a/Documentation/PCI/picebus-howto.rst
+++ b/Documentation/PCI/pciebus-howto.rst
diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt
index 8151f0195f76..23f115dc87cf 100644
--- a/Documentation/RCU/rculist_nulls.txt
+++ b/Documentation/RCU/rculist_nulls.txt
@@ -1,7 +1,7 @@
1Using hlist_nulls to protect read-mostly linked lists and 1Using hlist_nulls to protect read-mostly linked lists and
2objects using SLAB_TYPESAFE_BY_RCU allocations. 2objects using SLAB_TYPESAFE_BY_RCU allocations.
3 3
4Please read the basics in Documentation/RCU/listRCU.txt 4Please read the basics in Documentation/RCU/listRCU.rst
5 5
6Using special makers (called 'nulls') is a convenient way 6Using special makers (called 'nulls') is a convenient way
7to solve following problem : 7to solve following problem :
diff --git a/Documentation/admin-guide/conf.py b/Documentation/admin-guide/conf.py
deleted file mode 100644
index 86f738953799..000000000000
--- a/Documentation/admin-guide/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = 'Linux Kernel User Documentation'
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'linux-user.tex', 'Linux Kernel User Documentation',
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 25f3b2532198..e05e581af5cf 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -41,10 +41,11 @@ Related CVEs
41 41
42The following CVE entries describe Spectre variants: 42The following CVE entries describe Spectre variants:
43 43
44 ============= ======================= ================= 44 ============= ======================= ==========================
45 CVE-2017-5753 Bounds check bypass Spectre variant 1 45 CVE-2017-5753 Bounds check bypass Spectre variant 1
46 CVE-2017-5715 Branch target injection Spectre variant 2 46 CVE-2017-5715 Branch target injection Spectre variant 2
47 ============= ======================= ================= 47 CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
48 ============= ======================= ==========================
48 49
49Problem 50Problem
50------- 51-------
@@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
78over the network, see :ref:`[12] <spec_ref12>`. However such attacks 79over the network, see :ref:`[12] <spec_ref12>`. However such attacks
79are difficult, low bandwidth, fragile, and are considered low risk. 80are difficult, low bandwidth, fragile, and are considered low risk.
80 81
82Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
83only about user-controlled array bounds checks. It can affect any
84conditional checks. The kernel entry code interrupt, exception, and NMI
85handlers all have conditional swapgs checks. Those may be problematic
86in the context of Spectre v1, as kernel code can speculatively run with
87a user GS.
88
81Spectre variant 2 (Branch Target Injection) 89Spectre variant 2 (Branch Target Injection)
82------------------------------------------- 90-------------------------------------------
83 91
@@ -132,6 +140,9 @@ not cover all possible attack vectors.
1321. A user process attacking the kernel 1401. A user process attacking the kernel
133^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 141^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
134 142
143Spectre variant 1
144~~~~~~~~~~~~~~~~~
145
135 The attacker passes a parameter to the kernel via a register or 146 The attacker passes a parameter to the kernel via a register or
136 via a known address in memory during a syscall. Such parameter may 147 via a known address in memory during a syscall. Such parameter may
137 be used later by the kernel as an index to an array or to derive 148 be used later by the kernel as an index to an array or to derive
@@ -144,7 +155,40 @@ not cover all possible attack vectors.
144 potentially be influenced for Spectre attacks, new "nospec" accessor 155 potentially be influenced for Spectre attacks, new "nospec" accessor
145 macros are used to prevent speculative loading of data. 156 macros are used to prevent speculative loading of data.
146 157
147 Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch 158Spectre variant 1 (swapgs)
159~~~~~~~~~~~~~~~~~~~~~~~~~~
160
161 An attacker can train the branch predictor to speculatively skip the
162 swapgs path for an interrupt or exception. If they initialize
163 the GS register to a user-space value, if the swapgs is speculatively
164 skipped, subsequent GS-related percpu accesses in the speculation
165 window will be done with the attacker-controlled GS value. This
166 could cause privileged memory to be accessed and leaked.
167
168 For example:
169
170 ::
171
172 if (coming from user space)
173 swapgs
174 mov %gs:<percpu_offset>, %reg
175 mov (%reg), %reg1
176
177 When coming from user space, the CPU can speculatively skip the
178 swapgs, and then do a speculative percpu load using the user GS
179 value. So the user can speculatively force a read of any kernel
180 value. If a gadget exists which uses the percpu value as an address
181 in another load/store, then the contents of the kernel value may
182 become visible via an L1 side channel attack.
183
184 A similar attack exists when coming from kernel space. The CPU can
185 speculatively do the swapgs, causing the user GS to get used for the
186 rest of the speculative window.
187
188Spectre variant 2
189~~~~~~~~~~~~~~~~~
190
191 A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
148 target buffer (BTB) before issuing syscall to launch an attack. 192 target buffer (BTB) before issuing syscall to launch an attack.
149 After entering the kernel, the kernel could use the poisoned branch 193 After entering the kernel, the kernel could use the poisoned branch
150 target buffer on indirect jump and jump to gadget code in speculative 194 target buffer on indirect jump and jump to gadget code in speculative
@@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
280 324
281The possible values in this file are: 325The possible values in this file are:
282 326
283 ======================================= ================================= 327 .. list-table::
284 'Mitigation: __user pointer sanitation' Protection in kernel on a case by 328
285 case base with explicit pointer 329 * - 'Not affected'
286 sanitation. 330 - The processor is not vulnerable.
287 ======================================= ================================= 331 * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
332 - The swapgs protections are disabled; otherwise it has
333 protection in the kernel on a case by case base with explicit
334 pointer sanitation and usercopy LFENCE barriers.
335 * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
336 - Protection in the kernel on a case by case base with explicit
337 pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
338 barriers.
288 339
289However, the protections are put in place on a case by case basis, 340However, the protections are put in place on a case by case basis,
290and there is no guarantee that all possible attack vectors for Spectre 341and there is no guarantee that all possible attack vectors for Spectre
@@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
3661. Kernel mitigation 4171. Kernel mitigation
367^^^^^^^^^^^^^^^^^^^^ 418^^^^^^^^^^^^^^^^^^^^
368 419
420Spectre variant 1
421~~~~~~~~~~~~~~~~~
422
369 For the Spectre variant 1, vulnerable kernel code (as determined 423 For the Spectre variant 1, vulnerable kernel code (as determined
370 by code audit or scanning tools) is annotated on a case by case 424 by code audit or scanning tools) is annotated on a case by case
371 basis to use nospec accessor macros for bounds clipping :ref:`[2] 425 basis to use nospec accessor macros for bounds clipping :ref:`[2]
372 <spec_ref2>` to avoid any usable disclosure gadgets. However, it may 426 <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
373 not cover all attack vectors for Spectre variant 1. 427 not cover all attack vectors for Spectre variant 1.
374 428
429 Copy-from-user code has an LFENCE barrier to prevent the access_ok()
430 check from being mis-speculated. The barrier is done by the
431 barrier_nospec() macro.
432
433 For the swapgs variant of Spectre variant 1, LFENCE barriers are
434 added to interrupt, exception and NMI entry where needed. These
435 barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
436 FENCE_SWAPGS_USER_ENTRY macros.
437
438Spectre variant 2
439~~~~~~~~~~~~~~~~~
440
375 For Spectre variant 2 mitigation, the compiler turns indirect calls or 441 For Spectre variant 2 mitigation, the compiler turns indirect calls or
376 jumps in the kernel into equivalent return trampolines (retpolines) 442 jumps in the kernel into equivalent return trampolines (retpolines)
377 :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target 443 :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
@@ -473,6 +539,12 @@ Mitigation control on the kernel command line
473Spectre variant 2 mitigation can be disabled or force enabled at the 539Spectre variant 2 mitigation can be disabled or force enabled at the
474kernel command line. 540kernel command line.
475 541
542 nospectre_v1
543
544 [X86,PPC] Disable mitigations for Spectre Variant 1
545 (bounds check bypass). With this option data leaks are
546 possible in the system.
547
476 nospectre_v2 548 nospectre_v2
477 549
478 [X86] Disable all mitigations for the Spectre variant 2 550 [X86] Disable all mitigations for the Spectre variant 2
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 46b826fcb5ad..4c1971960afa 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2545,7 +2545,7 @@
2545 mem_encrypt=on: Activate SME 2545 mem_encrypt=on: Activate SME
2546 mem_encrypt=off: Do not activate SME 2546 mem_encrypt=off: Do not activate SME
2547 2547
2548 Refer to Documentation/virtual/kvm/amd-memory-encryption.rst 2548 Refer to Documentation/virt/kvm/amd-memory-encryption.rst
2549 for details on when memory encryption can be activated. 2549 for details on when memory encryption can be activated.
2550 2550
2551 mem_sleep_default= [SUSPEND] Default system suspend mode: 2551 mem_sleep_default= [SUSPEND] Default system suspend mode:
@@ -2604,7 +2604,7 @@
2604 expose users to several CPU vulnerabilities. 2604 expose users to several CPU vulnerabilities.
2605 Equivalent to: nopti [X86,PPC] 2605 Equivalent to: nopti [X86,PPC]
2606 kpti=0 [ARM64] 2606 kpti=0 [ARM64]
2607 nospectre_v1 [PPC] 2607 nospectre_v1 [X86,PPC]
2608 nobp=0 [S390] 2608 nobp=0 [S390]
2609 nospectre_v2 [X86,PPC,S390,ARM64] 2609 nospectre_v2 [X86,PPC,S390,ARM64]
2610 spectre_v2_user=off [X86] 2610 spectre_v2_user=off [X86]
@@ -2965,9 +2965,9 @@
2965 nosmt=force: Force disable SMT, cannot be undone 2965 nosmt=force: Force disable SMT, cannot be undone
2966 via the sysfs control file. 2966 via the sysfs control file.
2967 2967
2968 nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds 2968 nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
2969 check bypass). With this option data leaks are possible 2969 (bounds check bypass). With this option data leaks are
2970 in the system. 2970 possible in the system.
2971 2971
2972 nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for 2972 nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
2973 the Spectre variant 2 (indirect branch prediction) 2973 the Spectre variant 2 (indirect branch prediction)
@@ -4090,6 +4090,13 @@
4090 Run specified binary instead of /init from the ramdisk, 4090 Run specified binary instead of /init from the ramdisk,
4091 used for early userspace startup. See initrd. 4091 used for early userspace startup. See initrd.
4092 4092
4093 rdrand= [X86]
4094 force - Override the decision by the kernel to hide the
4095 advertisement of RDRAND support (this affects
4096 certain AMD processors because of buggy BIOS
4097 support, specifically around the suspend/resume
4098 path).
4099
4093 rdt= [HW,X86,RDT] 4100 rdt= [HW,X86,RDT]
4094 Turn on/off individual RDT features. List is: 4101 Turn on/off individual RDT features. List is:
4095 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, 4102 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 7ab93a8404b9..bd5714547cee 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -53,7 +53,7 @@ disabled, there is ``khugepaged`` daemon that scans memory and
53collapses sequences of basic pages into huge pages. 53collapses sequences of basic pages into huge pages.
54 54
55The THP behaviour is controlled via :ref:`sysfs <thp_sysfs>` 55The THP behaviour is controlled via :ref:`sysfs <thp_sysfs>`
56interface and using madivse(2) and prctl(2) system calls. 56interface and using madvise(2) and prctl(2) system calls.
57 57
58Transparent Hugepage Support maximizes the usefulness of free memory 58Transparent Hugepage Support maximizes the usefulness of free memory
59if compared to the reservation approach of hugetlbfs by allowing all 59if compared to the reservation approach of hugetlbfs by allowing all
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index a7d44e71019d..287b98708a40 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net
39 802 E802 protocol ax25 AX25 39 802 E802 protocol ax25 AX25
40 ethernet Ethernet protocol rose X.25 PLP layer 40 ethernet Ethernet protocol rose X.25 PLP layer
41 ipv4 IP version 4 x25 X.25 protocol 41 ipv4 IP version 4 x25 X.25 protocol
42 ipx IPX token-ring IBM token ring
43 bridge Bridging decnet DEC net 42 bridge Bridging decnet DEC net
44 ipv6 IP version 6 tipc TIPC 43 ipv6 IP version 6 tipc TIPC
45 ========= =================== = ========== ================== 44 ========= =================== = ========== ==================
@@ -401,33 +400,7 @@ interface.
401(network) that the route leads to, the router (may be directly connected), the 400(network) that the route leads to, the router (may be directly connected), the
402route flags, and the device the route is using. 401route flags, and the device the route is using.
403 402
404 4035. TIPC
4055. IPX
406------
407
408The IPX protocol has no tunable values in proc/sys/net.
409
410The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
411socket giving the local and remote addresses in Novell format (that is
412network:node:port). In accordance with the strange Novell tradition,
413everything but the port is in hex. Not_Connected is displayed for sockets that
414are not tied to a specific remote address. The Tx and Rx queue sizes indicate
415the number of bytes pending for transmission and reception. The state
416indicates the state the socket is in and the uid is the owning uid of the
417socket.
418
419The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
420it gives the network number, the node number, and indicates if the network is
421the primary network. It also indicates which device it is bound to (or
422Internal for internal networks) and the Frame Type if appropriate. Linux
423supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
424IPX.
425
426The /proc/net/ipx_route table holds a list of IPX routes. For each route it
427gives the destination network, the router node (or Directly) and the network
428address of the router (or Connected) for internal networks.
429
4306. TIPC
431------- 404-------
432 405
433tipc_rmem 406tipc_rmem
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 3b2397bcb565..a8fe845832bc 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -16,6 +16,8 @@ import sys
16import os 16import os
17import sphinx 17import sphinx
18 18
19from subprocess import check_output
20
19# Get Sphinx version 21# Get Sphinx version
20major, minor, patch = sphinx.version_info[:3] 22major, minor, patch = sphinx.version_info[:3]
21 23
@@ -276,10 +278,21 @@ latex_elements = {
276 \\setsansfont{DejaVu Sans} 278 \\setsansfont{DejaVu Sans}
277 \\setromanfont{DejaVu Serif} 279 \\setromanfont{DejaVu Serif}
278 \\setmonofont{DejaVu Sans Mono} 280 \\setmonofont{DejaVu Sans Mono}
279
280 ''' 281 '''
281} 282}
282 283
284# At least one book (translations) may have Asian characters
285# with are only displayed if xeCJK is used
286
287cjk_cmd = check_output(['fc-list', '--format="%{family[0]}\n"']).decode('utf-8', 'ignore')
288if cjk_cmd.find("Noto Sans CJK SC") >= 0:
289 print ("enabling CJK for LaTeX builder")
290 latex_elements['preamble'] += '''
291 % This is needed for translations
292 \\usepackage{xeCJK}
293 \\setCJKmainfont{Noto Sans CJK SC}
294 '''
295
283# Fix reference escape troubles with Sphinx 1.4.x 296# Fix reference escape troubles with Sphinx 1.4.x
284if major == 1 and minor > 3: 297if major == 1 and minor > 3:
285 latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n' 298 latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
@@ -410,6 +423,21 @@ latex_documents = [
410 'The kernel development community', 'manual'), 423 'The kernel development community', 'manual'),
411] 424]
412 425
426# Add all other index files from Documentation/ subdirectories
427for fn in os.listdir('.'):
428 doc = os.path.join(fn, "index")
429 if os.path.exists(doc + ".rst"):
430 has = False
431 for l in latex_documents:
432 if l[0] == doc:
433 has = True
434 break
435 if not has:
436 latex_documents.append((doc, fn + '.tex',
437 'Linux %s Documentation' % fn.capitalize(),
438 'The kernel development community',
439 'manual'))
440
413# The name of an image file (relative to this directory) to place at the top of 441# The name of an image file (relative to this directory) to place at the top of
414# the title page. 442# the title page.
415#latex_logo = None 443#latex_logo = None
diff --git a/Documentation/core-api/conf.py b/Documentation/core-api/conf.py
deleted file mode 100644
index db1f7659f3da..000000000000
--- a/Documentation/core-api/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Core-API Documentation"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'core-api.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/crypto/conf.py b/Documentation/crypto/conf.py
deleted file mode 100644
index 4335d251ddf3..000000000000
--- a/Documentation/crypto/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = 'Linux Kernel Crypto API'
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'crypto-api.tex', 'Linux Kernel Crypto API manual',
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/dev-tools/conf.py b/Documentation/dev-tools/conf.py
deleted file mode 100644
index 7faafa3f7888..000000000000
--- a/Documentation/dev-tools/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Development tools for the kernel"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'dev-tools.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6b0dfd5c17ba..5138a2f6232a 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@
19 19
20DT_DOCS = $(shell \ 20DT_DOCS = $(shell \
21 cd $(srctree)/$(src) && \ 21 cd $(srctree)/$(src) && \
22 find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ 22 find * \( -name '*.yaml' ! \
23 -name $(DT_TMP_SCHEMA) ! \
24 -name '*.example.dt.yaml' \) \
23 ) 25 )
24 26
25DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 27DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index 326f29b270ad..2d325bed37e5 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -703,4 +703,4 @@ cpus {
703 https://www.devicetree.org/specifications/ 703 https://www.devicetree.org/specifications/
704 704
705[6] ARM Linux Kernel documentation - Booting AArch64 Linux 705[6] ARM Linux Kernel documentation - Booting AArch64 Linux
706 Documentation/arm64/booting.txt 706 Documentation/arm64/booting.rst
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
index 08c923f8c257..28eb458f761a 100644
--- a/Documentation/devicetree/bindings/arm/renesas.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/arm/shmobile.yaml# 4$id: http://devicetree.org/schemas/arm/renesas.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings 7title: Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
index aae53fc3cb1e..2bd519d2e855 100644
--- a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
+++ b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/arm/milbeaut.yaml# 4$id: http://devicetree.org/schemas/arm/socionext/milbeaut.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Milbeaut platforms device tree bindings 7title: Milbeaut platforms device tree bindings
diff --git a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml
index 4326d2cfa15d..a8765ba29476 100644
--- a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml
+++ b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/arm/ti/davinci.yaml# 4$id: http://devicetree.org/schemas/arm/ti/ti,davinci.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Texas Instruments DaVinci Platforms Device Tree Bindings 7title: Texas Instruments DaVinci Platforms Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
index c935405458fe..fa4d143a73de 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/phy/allwinner,sun4i-a10-ccu.yaml# 4$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-ccu.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Allwinner Clock Control Unit Device Tree Bindings 7title: Allwinner Clock Control Unit Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
index 8cb136c376fb..4f0db8ee226a 100644
--- a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
+++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
@@ -2,7 +2,7 @@
2# Copyright 2019 Linaro Ltd. 2# Copyright 2019 Linaro Ltd.
3%YAML 1.2 3%YAML 1.2
4--- 4---
5$id: "http://devicetree.org/schemas/firmware/intel-ixp4xx-network-processing-engine.yaml#" 5$id: "http://devicetree.org/schemas/firmware/intel,ixp4xx-network-processing-engine.yaml#"
6$schema: "http://devicetree.org/meta-schemas/core.yaml#" 6$schema: "http://devicetree.org/meta-schemas/core.yaml#"
7 7
8title: Intel IXP4xx Network Processing Engine 8title: Intel IXP4xx Network Processing Engine
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
index 7ba167e2e1ea..c602b6fe1c0c 100644
--- a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/iio/accelerometers/adi,adxl345.yaml# 4$id: http://devicetree.org/schemas/iio/accel/adi,adxl345.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Analog Devices ADXL345/ADXL375 3-Axis Digital Accelerometers 7title: Analog Devices ADXL345/ADXL375 3-Axis Digital Accelerometers
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml
index a7fafb9bf5c6..e7daffec88d3 100644
--- a/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/iio/accelerometers/adi,adxl372.yaml# 4$id: http://devicetree.org/schemas/iio/accel/adi,adxl372.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Analog Devices ADXL372 3-Axis, +/-(200g) Digital Accelerometer 7title: Analog Devices ADXL372 3-Axis, +/-(200g) Digital Accelerometer
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
index 4e82fd575cec..c676b03c752e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
@@ -5,21 +5,19 @@ Required properties:
5- compatible: should be "amazon,al-fic" 5- compatible: should be "amazon,al-fic"
6- reg: physical base address and size of the registers 6- reg: physical base address and size of the registers
7- interrupt-controller: identifies the node as an interrupt controller 7- interrupt-controller: identifies the node as an interrupt controller
8- #interrupt-cells: must be 2. 8- #interrupt-cells : must be 2. Specifies the number of cells needed to encode
9 First cell defines the index of the interrupt within the controller. 9 an interrupt source. Supported trigger types are low-to-high edge
10 Second cell is used to specify the trigger type and must be one of the 10 triggered and active high level-sensitive.
11 following:
12 - bits[3:0] trigger type and level flags
13 1 = low-to-high edge triggered
14 4 = active high level-sensitive
15- interrupt-parent: specifies the parent interrupt controller.
16- interrupts: describes which input line in the interrupt parent, this 11- interrupts: describes which input line in the interrupt parent, this
17 fic's output is connected to. This field property depends on the parent's 12 fic's output is connected to. This field property depends on the parent's
18 binding 13 binding
19 14
15Please refer to interrupts.txt in this directory for details of the common
16Interrupt Controllers bindings used by client devices.
17
20Example: 18Example:
21 19
22amazon_fic: interrupt-controller@0xfd8a8500 { 20amazon_fic: interrupt-controller@fd8a8500 {
23 compatible = "amazon,al-fic"; 21 compatible = "amazon,al-fic";
24 interrupt-controller; 22 interrupt-controller;
25 #interrupt-cells = <2>; 23 #interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
index bae10e261fa9..507c141ea760 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
@@ -2,7 +2,7 @@
2# Copyright 2018 Linaro Ltd. 2# Copyright 2018 Linaro Ltd.
3%YAML 1.2 3%YAML 1.2
4--- 4---
5$id: "http://devicetree.org/schemas/interrupt/intel-ixp4xx-interrupt.yaml#" 5$id: "http://devicetree.org/schemas/interrupt-controller/intel,ixp4xx-interrupt.yaml#"
6$schema: "http://devicetree.org/meta-schemas/core.yaml#" 6$schema: "http://devicetree.org/meta-schemas/core.yaml#"
7 7
8title: Intel IXP4xx XScale Networking Processors Interrupt Controller 8title: Intel IXP4xx XScale Networking Processors Interrupt Controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 09fc02b99845..a5c1db95b3ec 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -1,20 +1,30 @@
1* ARC-HS Interrupt Distribution Unit 1* ARC-HS Interrupt Distribution Unit
2 2
3 This optional 2nd level interrupt controller can be used in SMP configurations for 3 This optional 2nd level interrupt controller can be used in SMP configurations
4 dynamic IRQ routing, load balancing of common/external IRQs towards core intc. 4 for dynamic IRQ routing, load balancing of common/external IRQs towards core
5 intc.
5 6
6Properties: 7Properties:
7 8
8- compatible: "snps,archs-idu-intc" 9- compatible: "snps,archs-idu-intc"
9- interrupt-controller: This is an interrupt controller. 10- interrupt-controller: This is an interrupt controller.
10- #interrupt-cells: Must be <1>. 11- #interrupt-cells: Must be <1> or <2>.
11 12
12 Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N 13 Value of the first cell specifies the "common" IRQ from peripheral to IDU.
13 of the particular interrupt line of IDU corresponds to the line N+24 of the 14 Number N of the particular interrupt line of IDU corresponds to the line N+24
14 core interrupt controller. 15 of the core interrupt controller.
15 16
16 intc accessed via the special ARC AUX register interface, hence "reg" property 17 The (optional) second cell specifies any of the following flags:
17 is not specified. 18 - bits[3:0] trigger type and level flags
19 1 = low-to-high edge triggered
20 2 = NOT SUPPORTED (high-to-low edge triggered)
21 4 = active high level-sensitive <<< DEFAULT
22 8 = NOT SUPPORTED (active low level-sensitive)
23 When no second cell is specified, the interrupt is assumed to be level
24 sensitive.
25
26 The interrupt controller is accessed via the special ARC AUX register
27 interface, hence "reg" property is not specified.
18 28
19Example: 29Example:
20 core_intc: core-interrupt-controller { 30 core_intc: core-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
index d2313b1d9405..0ea21a6f70b4 100644
--- a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml
+++ b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
@@ -2,7 +2,7 @@
2# Copyright 2019 Linaro Ltd. 2# Copyright 2019 Linaro Ltd.
3%YAML 1.2 3%YAML 1.2
4--- 4---
5$id: "http://devicetree.org/schemas/misc/intel-ixp4xx-ahb-queue-manager.yaml#" 5$id: "http://devicetree.org/schemas/misc/intel,ixp4xx-ahb-queue-manager.yaml#"
6$schema: "http://devicetree.org/meta-schemas/core.yaml#" 6$schema: "http://devicetree.org/meta-schemas/core.yaml#"
7 7
8title: Intel IXP4xx AHB Queue Manager 8title: Intel IXP4xx AHB Queue Manager
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index d4084c149768..3fb0714e761e 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/net/allwinner,sun8i-a83t-gmac.yaml# 4$id: http://devicetree.org/schemas/net/allwinner,sun8i-a83t-emac.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Allwinner A83t EMAC Device Tree Bindings 7title: Allwinner A83t EMAC Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index 4ac21cef370e..113e7ac79aad 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -12,6 +12,7 @@ Required properties:
12 - "microchip,ksz8565" 12 - "microchip,ksz8565"
13 - "microchip,ksz9893" 13 - "microchip,ksz9893"
14 - "microchip,ksz9563" 14 - "microchip,ksz9563"
15 - "microchip,ksz8563"
15 16
16Optional properties: 17Optional properties:
17 18
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 2d41fb96ce0a..5b88fae0307d 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7,18 +7,6 @@ Required properties:
7- phy-mode : See ethernet.txt file in the same directory 7- phy-mode : See ethernet.txt file in the same directory
8 8
9Optional properties: 9Optional properties:
10- phy-reset-gpios : Should specify the gpio for phy reset
11- phy-reset-duration : Reset duration in milliseconds. Should present
12 only if property "phy-reset-gpios" is available. Missing the property
13 will have the duration be 1 millisecond. Numbers greater than 1000 are
14 invalid and 1 millisecond will be used instead.
15- phy-reset-active-high : If present then the reset sequence using the GPIO
16 specified in the "phy-reset-gpios" property is reversed (H=reset state,
17 L=operation state).
18- phy-reset-post-delay : Post reset delay in milliseconds. If present then
19 a delay of phy-reset-post-delay milliseconds will be observed after the
20 phy-reset-gpios has been toggled. Can be omitted thus no delay is
21 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
22- phy-supply : regulator that powers the Ethernet PHY. 10- phy-supply : regulator that powers the Ethernet PHY.
23- phy-handle : phandle to the PHY device connected to this device. 11- phy-handle : phandle to the PHY device connected to this device.
24- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. 12- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
@@ -47,11 +35,27 @@ Optional properties:
47 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse 35 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
48 per second interrupt associated with 1588 precision time protocol(PTP). 36 per second interrupt associated with 1588 precision time protocol(PTP).
49 37
50
51Optional subnodes: 38Optional subnodes:
52- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes 39- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
53 according to phy.txt in the same directory 40 according to phy.txt in the same directory
54 41
42Deprecated optional properties:
43 To avoid these, create a phy node according to phy.txt in the same
44 directory, and point the fec's "phy-handle" property to it. Then use
45 the phy's reset binding, again described by phy.txt.
46- phy-reset-gpios : Should specify the gpio for phy reset
47- phy-reset-duration : Reset duration in milliseconds. Should present
48 only if property "phy-reset-gpios" is available. Missing the property
49 will have the duration be 1 millisecond. Numbers greater than 1000 are
50 invalid and 1 millisecond will be used instead.
51- phy-reset-active-high : If present then the reset sequence using the GPIO
52 specified in the "phy-reset-gpios" property is reversed (H=reset state,
53 L=operation state).
54- phy-reset-post-delay : Post reset delay in milliseconds. If present then
55 a delay of phy-reset-post-delay milliseconds will be observed after the
56 phy-reset-gpios has been toggled. Can be omitted thus no delay is
57 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
58
55Example: 59Example:
56 60
57ethernet@83fec000 { 61ethernet@83fec000 {
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 63c73fafe26d..0b61a90f1592 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -15,10 +15,10 @@ Required properties:
15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. 15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. 16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. 17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
18 Use "sifive,fu540-macb" for SiFive FU540-C000 SoC. 18 Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
19 Or the generic form: "cdns,emac". 19 Or the generic form: "cdns,emac".
20- reg: Address and length of the register set for the device 20- reg: Address and length of the register set for the device
21 For "sifive,fu540-macb", second range is required to specify the 21 For "sifive,fu540-c000-gem", second range is required to specify the
22 address and length of the registers for GEMGXL Management block. 22 address and length of the registers for GEMGXL Management block.
23- interrupts: Should contain macb interrupt 23- interrupts: Should contain macb interrupt
24- phy-mode: See ethernet.txt file in the same directory. 24- phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
index c9efd6e2c134..1084e9d2917d 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
@@ -37,13 +37,13 @@ required:
37 37
38examples: 38examples:
39 - | 39 - |
40 sid@1c23800 { 40 efuse@1c23800 {
41 compatible = "allwinner,sun4i-a10-sid"; 41 compatible = "allwinner,sun4i-a10-sid";
42 reg = <0x01c23800 0x10>; 42 reg = <0x01c23800 0x10>;
43 }; 43 };
44 44
45 - | 45 - |
46 sid@1c23800 { 46 efuse@1c23800 {
47 compatible = "allwinner,sun7i-a20-sid"; 47 compatible = "allwinner,sun7i-a20-sid";
48 reg = <0x01c23800 0x200>; 48 reg = <0x01c23800 0x200>;
49 }; 49 };
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml b/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml
new file mode 100644
index 000000000000..b7c00ed31085
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml
@@ -0,0 +1,45 @@
1# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2%YAML 1.2
3---
4$id: http://devicetree.org/schemas/nvmem/nvmem-consumer.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml#
6
7title: NVMEM (Non Volatile Memory) Consumer Device Tree Bindings
8
9maintainers:
10 - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
11
12select: true
13
14properties:
15 nvmem:
16 $ref: /schemas/types.yaml#/definitions/phandle-array
17 description:
18 List of phandle to the nvmem providers.
19
20 nvmem-cells:
21 $ref: /schemas/types.yaml#/definitions/phandle-array
22 description:
23 List of phandle to the nvmem data cells.
24
25 nvmem-names:
26 $ref: /schemas/types.yaml#/definitions/string-array
27 description:
28 Names for the each nvmem provider.
29
30 nvmem-cell-names:
31 $ref: /schemas/types.yaml#/definitions/string-array
32 description:
33 Names for each nvmem-cells specified.
34
35dependencies:
36 nvmem-names: [ nvmem ]
37 nvmem-cell-names: [ nvmem-cells ]
38
39examples:
40 - |
41 tsens {
42 /* ... */
43 nvmem-cells = <&tsens_calibration>;
44 nvmem-cell-names = "calibration";
45 };
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.txt b/Documentation/devicetree/bindings/nvmem/nvmem.txt
index fd06c09b822b..46a7ef485e24 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.txt
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.txt
@@ -1,80 +1 @@
1= NVMEM(Non Volatile Memory) Data Device Tree Bindings = This file has been moved to nvmem.yaml and nvmem-consumer.yaml.
2
3This binding is intended to represent the location of hardware
4configuration data stored in NVMEMs like eeprom, efuses and so on.
5
6On a significant proportion of boards, the manufacturer has stored
7some data on NVMEM, for the OS to be able to retrieve these information
8and act upon it. Obviously, the OS has to know about where to retrieve
9these data from, and where they are stored on the storage device.
10
11This document is here to document this.
12
13= Data providers =
14Contains bindings specific to provider drivers and data cells as children
15of this node.
16
17Optional properties:
18 read-only: Mark the provider as read only.
19
20= Data cells =
21These are the child nodes of the provider which contain data cell
22information like offset and size in nvmem provider.
23
24Required properties:
25reg: specifies the offset in byte within the storage device.
26
27Optional properties:
28
29bits: Is pair of bit location and number of bits, which specifies offset
30 in bit and number of bits within the address range specified by reg property.
31 Offset takes values from 0-7.
32
33For example:
34
35 /* Provider */
36 qfprom: qfprom@700000 {
37 ...
38
39 /* Data cells */
40 tsens_calibration: calib@404 {
41 reg = <0x404 0x10>;
42 };
43
44 tsens_calibration_bckp: calib_bckp@504 {
45 reg = <0x504 0x11>;
46 bits = <6 128>
47 };
48
49 pvs_version: pvs-version@6 {
50 reg = <0x6 0x2>
51 bits = <7 2>
52 };
53
54 speed_bin: speed-bin@c{
55 reg = <0xc 0x1>;
56 bits = <2 3>;
57
58 };
59 ...
60 };
61
62= Data consumers =
63Are device nodes which consume nvmem data cells/providers.
64
65Required-properties:
66nvmem-cells: list of phandle to the nvmem data cells.
67nvmem-cell-names: names for the each nvmem-cells specified. Required if
68 nvmem-cells is used.
69
70Optional-properties:
71nvmem : list of phandles to nvmem providers.
72nvmem-names: names for the each nvmem provider. required if nvmem is used.
73
74For example:
75
76 tsens {
77 ...
78 nvmem-cells = <&tsens_calibration>;
79 nvmem-cell-names = "calibration";
80 };
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
new file mode 100644
index 000000000000..1c75a059206c
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -0,0 +1,93 @@
1# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2%YAML 1.2
3---
4$id: http://devicetree.org/schemas/nvmem/nvmem.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml#
6
7title: NVMEM (Non Volatile Memory) Device Tree Bindings
8
9maintainers:
10 - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
11
12description: |
13 This binding is intended to represent the location of hardware
14 configuration data stored in NVMEMs like eeprom, efuses and so on.
15
16 On a significant proportion of boards, the manufacturer has stored
17 some data on NVMEM, for the OS to be able to retrieve these
18 information and act upon it. Obviously, the OS has to know about
19 where to retrieve these data from, and where they are stored on the
20 storage device.
21
22properties:
23 $nodename:
24 pattern: "^(eeprom|efuse|nvram)(@.*|-[0-9a-f])*$"
25
26 "#address-cells":
27 const: 1
28
29 "#size-cells":
30 const: 1
31
32 read-only:
33 $ref: /schemas/types.yaml#/definitions/flag
34 description:
35 Mark the provider as read only.
36
37patternProperties:
38 "^.*@[0-9a-f]+$":
39 type: object
40
41 properties:
42 reg:
43 maxItems: 1
44 description:
45 Offset and size in bytes within the storage device.
46
47 bits:
48 maxItems: 1
49 items:
50 items:
51 - minimum: 0
52 maximum: 7
53 description:
54 Offset in bit within the address range specified by reg.
55 - minimum: 1
56 description:
57 Size in bit within the address range specified by reg.
58
59 required:
60 - reg
61
62 additionalProperties: false
63
64examples:
65 - |
66 qfprom: eeprom@700000 {
67 #address-cells = <1>;
68 #size-cells = <1>;
69
70 /* ... */
71
72 /* Data cells */
73 tsens_calibration: calib@404 {
74 reg = <0x404 0x10>;
75 };
76
77 tsens_calibration_bckp: calib_bckp@504 {
78 reg = <0x504 0x11>;
79 bits = <6 128>;
80 };
81
82 pvs_version: pvs-version@6 {
83 reg = <0x6 0x2>;
84 bits = <7 2>;
85 };
86
87 speed_bin: speed-bin@c{
88 reg = <0xc 0x1>;
89 bits = <2 3>;
90 };
91 };
92
93...
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
index 250f9d5aabdf..fa46670de299 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2%YAML 1.2 2%YAML 1.2
3--- 3---
4$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-mipi-dphy.yaml# 4$id: http://devicetree.org/schemas/phy/allwinner,sun6i-a31-mipi-dphy.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml# 5$schema: http://devicetree.org/meta-schemas/core.yaml#
6 6
7title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings 7title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 91d3e78b3395..400df2da018a 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -37,7 +37,8 @@ properties:
37 hwlocks: true 37 hwlocks: true
38 38
39 st,syscfg: 39 st,syscfg:
40 $ref: "/schemas/types.yaml#/definitions/phandle-array" 40 allOf:
41 - $ref: "/schemas/types.yaml#/definitions/phandle-array"
41 description: Should be phandle/offset/mask 42 description: Should be phandle/offset/mask
42 items: 43 items:
43 - description: Phandle to the syscon node which includes IRQ mux selection. 44 - description: Phandle to the syscon node which includes IRQ mux selection.
diff --git a/Documentation/devicetree/bindings/riscv/cpus.txt b/Documentation/devicetree/bindings/riscv/cpus.txt
deleted file mode 100644
index adf7b7af5dc3..000000000000
--- a/Documentation/devicetree/bindings/riscv/cpus.txt
+++ /dev/null
@@ -1,162 +0,0 @@
1===================
2RISC-V CPU Bindings
3===================
4
5The device tree allows to describe the layout of CPUs in a system through
6the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
7defining properties for every cpu.
8
9Bindings for CPU nodes follow the Devicetree Specification, available from:
10
11https://www.devicetree.org/specifications/
12
13with updates for 32-bit and 64-bit RISC-V systems provided in this document.
14
15===========
16Terminology
17===========
18
19This document uses some terminology common to the RISC-V community that is not
20widely used, the definitions of which are listed here:
21
22* hart: A hardware execution context, which contains all the state mandated by
23 the RISC-V ISA: a PC and some registers. This terminology is designed to
24 disambiguate software's view of execution contexts from any particular
25 microarchitectural implementation strategy. For example, my Intel laptop is
26 described as having one socket with two cores, each of which has two hyper
27 threads. Therefore this system has four harts.
28
29=====================================
30cpus and cpu node bindings definition
31=====================================
32
33The RISC-V architecture, in accordance with the Devicetree Specification,
34requires the cpus and cpu nodes to be present and contain the properties
35described below.
36
37- cpus node
38
39 Description: Container of cpu nodes
40
41 The node name must be "cpus".
42
43 A cpus node must define the following properties:
44
45 - #address-cells
46 Usage: required
47 Value type: <u32>
48 Definition: must be set to 1
49 - #size-cells
50 Usage: required
51 Value type: <u32>
52 Definition: must be set to 0
53
54- cpu node
55
56 Description: Describes a hart context
57
58 PROPERTIES
59
60 - device_type
61 Usage: required
62 Value type: <string>
63 Definition: must be "cpu"
64 - reg
65 Usage: required
66 Value type: <u32>
67 Definition: The hart ID of this CPU node
68 - compatible:
69 Usage: required
70 Value type: <stringlist>
71 Definition: must contain "riscv", may contain one of
72 "sifive,rocket0"
73 - mmu-type:
74 Usage: optional
75 Value type: <string>
76 Definition: Specifies the CPU's MMU type. Possible values are
77 "riscv,sv32"
78 "riscv,sv39"
79 "riscv,sv48"
80 - riscv,isa:
81 Usage: required
82 Value type: <string>
83 Definition: Contains the RISC-V ISA string of this hart. These
84 ISA strings are defined by the RISC-V ISA manual.
85
86Example: SiFive Freedom U540G Development Kit
87---------------------------------------------
88
89This system contains two harts: a hart marked as disabled that's used for
90low-level system tasks and should be ignored by Linux, and a second hart that
91Linux is allowed to run on.
92
93 cpus {
94 #address-cells = <1>;
95 #size-cells = <0>;
96 timebase-frequency = <1000000>;
97 cpu@0 {
98 clock-frequency = <1600000000>;
99 compatible = "sifive,rocket0", "riscv";
100 device_type = "cpu";
101 i-cache-block-size = <64>;
102 i-cache-sets = <128>;
103 i-cache-size = <16384>;
104 next-level-cache = <&L15 &L0>;
105 reg = <0>;
106 riscv,isa = "rv64imac";
107 status = "disabled";
108 L10: interrupt-controller {
109 #interrupt-cells = <1>;
110 compatible = "riscv,cpu-intc";
111 interrupt-controller;
112 };
113 };
114 cpu@1 {
115 clock-frequency = <1600000000>;
116 compatible = "sifive,rocket0", "riscv";
117 d-cache-block-size = <64>;
118 d-cache-sets = <64>;
119 d-cache-size = <32768>;
120 d-tlb-sets = <1>;
121 d-tlb-size = <32>;
122 device_type = "cpu";
123 i-cache-block-size = <64>;
124 i-cache-sets = <64>;
125 i-cache-size = <32768>;
126 i-tlb-sets = <1>;
127 i-tlb-size = <32>;
128 mmu-type = "riscv,sv39";
129 next-level-cache = <&L15 &L0>;
130 reg = <1>;
131 riscv,isa = "rv64imafdc";
132 status = "okay";
133 tlb-split;
134 L13: interrupt-controller {
135 #interrupt-cells = <1>;
136 compatible = "riscv,cpu-intc";
137 interrupt-controller;
138 };
139 };
140 };
141
142Example: Spike ISA Simulator with 1 Hart
143----------------------------------------
144
145This device tree matches the Spike ISA golden model as run with `spike -p1`.
146
147 cpus {
148 cpu@0 {
149 device_type = "cpu";
150 reg = <0x00000000>;
151 status = "okay";
152 compatible = "riscv";
153 riscv,isa = "rv64imafdc";
154 mmu-type = "riscv,sv48";
155 clock-frequency = <0x3b9aca00>;
156 interrupt-controller {
157 #interrupt-cells = <0x00000001>;
158 interrupt-controller;
159 compatible = "riscv,cpu-intc";
160 }
161 }
162 }
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index c899111aa5e3..b261a3015f84 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -10,6 +10,18 @@ maintainers:
10 - Paul Walmsley <paul.walmsley@sifive.com> 10 - Paul Walmsley <paul.walmsley@sifive.com>
11 - Palmer Dabbelt <palmer@sifive.com> 11 - Palmer Dabbelt <palmer@sifive.com>
12 12
13description: |
14 This document uses some terminology common to the RISC-V community
15 that is not widely used, the definitions of which are listed here:
16
17 hart: A hardware execution context, which contains all the state
18 mandated by the RISC-V ISA: a PC and some registers. This
19 terminology is designed to disambiguate software's view of execution
20 contexts from any particular microarchitectural implementation
21 strategy. For example, an Intel laptop containing one socket with
22 two cores, each of which has two hyperthreads, could be described as
23 having four harts.
24
13properties: 25properties:
14 compatible: 26 compatible:
15 items: 27 items:
@@ -50,6 +62,10 @@ properties:
50 User-Level ISA document, available from 62 User-Level ISA document, available from
51 https://riscv.org/specifications/ 63 https://riscv.org/specifications/
52 64
65 While the isa strings in ISA specification are case
66 insensitive, letters in the riscv,isa string must be all
67 lowercase to simplify parsing.
68
53 timebase-frequency: 69 timebase-frequency:
54 type: integer 70 type: integer
55 minimum: 1 71 minimum: 1
diff --git a/Documentation/devicetree/bindings/riscv/sifive.yaml b/Documentation/devicetree/bindings/riscv/sifive.yaml
index 9d17dc2f3f84..3ab532713dc1 100644
--- a/Documentation/devicetree/bindings/riscv/sifive.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive.yaml
@@ -19,7 +19,7 @@ properties:
19 compatible: 19 compatible:
20 items: 20 items:
21 - enum: 21 - enum:
22 - sifive,freedom-unleashed-a00 22 - sifive,hifive-unleashed-a00
23 - const: sifive,fu540-c000 23 - const: sifive,fu540-c000
24 - const: sifive,fu540 24 - const: sifive,fu540
25... 25...
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index 876c0623f322..a02e2fe2bfb2 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -73,7 +73,6 @@ patternProperties:
73 Compatible of the SPI device. 73 Compatible of the SPI device.
74 74
75 reg: 75 reg:
76 maxItems: 1
77 minimum: 0 76 minimum: 0
78 maximum: 256 77 maximum: 256
79 description: 78 description:
diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
index a36a0746c056..2807225db902 100644
--- a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
@@ -2,7 +2,7 @@
2# Copyright 2018 Linaro Ltd. 2# Copyright 2018 Linaro Ltd.
3%YAML 1.2 3%YAML 1.2
4--- 4---
5$id: "http://devicetree.org/schemas/timer/intel-ixp4xx-timer.yaml#" 5$id: "http://devicetree.org/schemas/timer/intel,ixp4xx-timer.yaml#"
6$schema: "http://devicetree.org/meta-schemas/core.yaml#" 6$schema: "http://devicetree.org/meta-schemas/core.yaml#"
7 7
8title: Intel IXP4xx XScale Networking Processors Timers 8title: Intel IXP4xx XScale Networking Processors Timers
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index bc7945e9dbfe..17915f64b8ee 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -64,10 +64,8 @@ Optional properties :
64 - power-on-time-ms : Specifies the time it takes from the time the host 64 - power-on-time-ms : Specifies the time it takes from the time the host
65 initiates the power-on sequence to a port until the port has adequate 65 initiates the power-on sequence to a port until the port has adequate
66 power. The value is given in ms in a 0 - 510 range (default is 100ms). 66 power. The value is given in ms in a 0 - 510 range (default is 100ms).
67 - swap-dx-lanes : Specifies the downstream ports which will swap the 67 - swap-dx-lanes : Specifies the ports which will swap the differential-pair
68 differential-pair (D+/D-), default is not-swapped. 68 (D+/D-), default is not-swapped.
69 - swap-us-lanes : Selects the upstream port differential-pair (D+/D-)
70 swapping (boolean, default is not-swapped)
71 69
72Examples: 70Examples:
73 usb2512b@2c { 71 usb2512b@2c {
diff --git a/Documentation/doc-guide/conf.py b/Documentation/doc-guide/conf.py
deleted file mode 100644
index fd3731182d5a..000000000000
--- a/Documentation/doc-guide/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = 'Linux Kernel Documentation Guide'
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide',
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/driver-api/80211/conf.py b/Documentation/driver-api/80211/conf.py
deleted file mode 100644
index 4424b4b0b9c3..000000000000
--- a/Documentation/driver-api/80211/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Linux 802.11 Driver Developer's Guide"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', '80211.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/driver-api/conf.py b/Documentation/driver-api/conf.py
deleted file mode 100644
index 202726d20088..000000000000
--- a/Documentation/driver-api/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "The Linux driver implementer's API guide"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'driver-api.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index 0c161b1a3be6..8382f01a53e3 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -233,7 +233,7 @@ Userspace Interface
233Several sysfs attributes are generated by the Generic Counter interface, 233Several sysfs attributes are generated by the Generic Counter interface,
234and reside under the /sys/bus/counter/devices/counterX directory, where 234and reside under the /sys/bus/counter/devices/counterX directory, where
235counterX refers to the respective counter device. Please see 235counterX refers to the respective counter device. Please see
236Documentation/ABI/testing/sys-bus-counter-generic-sysfs for detailed 236Documentation/ABI/testing/sysfs-bus-counter for detailed
237information on each Generic Counter interface sysfs attribute. 237information on each Generic Counter interface sysfs attribute.
238 238
239Through these sysfs attributes, programs and scripts may interact with 239Through these sysfs attributes, programs and scripts may interact with
@@ -325,7 +325,7 @@ sysfs attributes, where Y is the unique ID of the respective Count:
325 325
326For a more detailed breakdown of the available Generic Counter interface 326For a more detailed breakdown of the available Generic Counter interface
327sysfs attributes, please refer to the 327sysfs attributes, please refer to the
328Documentation/ABI/testing/sys-bus-counter file. 328Documentation/ABI/testing/sysfs-bus-counter file.
329 329
330The Signals and Counts associated with the Counter device are registered 330The Signals and Counts associated with the Counter device are registered
331to the system as well by the counter_register function. The 331to the system as well by the counter_register function. The
diff --git a/Documentation/driver-api/phy/phy.rst b/Documentation/driver-api/phy/phy.rst
index 457c3e0f86d6..8fc1ce0bb905 100644
--- a/Documentation/driver-api/phy/phy.rst
+++ b/Documentation/driver-api/phy/phy.rst
@@ -179,8 +179,8 @@ PHY Mappings
179 179
180In order to get reference to a PHY without help from DeviceTree, the framework 180In order to get reference to a PHY without help from DeviceTree, the framework
181offers lookups which can be compared to clkdev that allow clk structures to be 181offers lookups which can be compared to clkdev that allow clk structures to be
182bound to devices. A lookup can be made be made during runtime when a handle to 182bound to devices. A lookup can be made during runtime when a handle to the
183the struct phy already exists. 183struct phy already exists.
184 184
185The framework offers the following API for registering and unregistering the 185The framework offers the following API for registering and unregistering the
186lookups:: 186lookups::
diff --git a/Documentation/driver-api/pm/conf.py b/Documentation/driver-api/pm/conf.py
deleted file mode 100644
index a89fac11272f..000000000000
--- a/Documentation/driver-api/pm/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Device Power Management"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'pm.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO
index 9267f3fb131f..edbbccda1942 100644
--- a/Documentation/filesystems/cifs/TODO
+++ b/Documentation/filesystems/cifs/TODO
@@ -13,7 +13,8 @@ a) SMB3 (and SMB3.1.1) missing optional features:
13 - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl 13 - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
14 currently the only two server side copy mechanisms supported) 14 currently the only two server side copy mechanisms supported)
15 15
16b) improved sparse file support 16b) improved sparse file support (fiemap and SEEK_HOLE are implemented
17but additional features would be supportable by the protocol).
17 18
18c) Directory entry caching relies on a 1 second timer, rather than 19c) Directory entry caching relies on a 1 second timer, rather than
19using Directory Leases, currently only the root file handle is cached longer 20using Directory Leases, currently only the root file handle is cached longer
@@ -21,9 +22,13 @@ using Directory Leases, currently only the root file handle is cached longer
21d) quota support (needs minor kernel change since quota calls 22d) quota support (needs minor kernel change since quota calls
22to make it to network filesystems or deviceless filesystems) 23to make it to network filesystems or deviceless filesystems)
23 24
24e) Additional use cases where we use "compoounding" (e.g. open/query/close 25e) Additional use cases can be optimized to use "compounding"
25and open/setinfo/close) to reduce the number of roundtrips, and also 26(e.g. open/query/close and open/setinfo/close) to reduce the number
26open to reduce redundant opens (using deferred close and reference counts more). 27of roundtrips to the server and improve performance. Various cases
28(stat, statfs, create, unlink, mkdir) already have been improved by
29using compounding but more can be done. In addition we could significantly
30reduce redundant opens by using deferred close (with handle caching leases)
31and better using reference counters on file handles.
27 32
28f) Finish inotify support so kde and gnome file list windows 33f) Finish inotify support so kde and gnome file list windows
29will autorefresh (partially complete by Asser). Needs minor kernel 34will autorefresh (partially complete by Asser). Needs minor kernel
@@ -43,18 +48,17 @@ mount or a per server basis to client UIDs or nobody if no mapping
43exists. Also better integration with winbind for resolving SID owners 48exists. Also better integration with winbind for resolving SID owners
44 49
45k) Add tools to take advantage of more smb3 specific ioctls and features 50k) Add tools to take advantage of more smb3 specific ioctls and features
46(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server 51(passthrough ioctl/fsctl is now implemented in cifs.ko to allow sending
47is in progress, and a passthrough query_info call is already implemented 52various SMB3 fsctls and query info and set info calls directly from user space)
48in cifs.ko to allow smb3 info levels queries to be sent from userspace) 53Add tools to make setting various non-POSIX metadata attributes easier
54from tools (e.g. extending what was done in smb-info tool).
49 55
50l) encrypted file support 56l) encrypted file support
51 57
52m) improved stats gathering tools (perhaps integration with nfsometer?) 58m) improved stats gathering tools (perhaps integration with nfsometer?)
53to extend and make easier to use what is currently in /proc/fs/cifs/Stats 59to extend and make easier to use what is currently in /proc/fs/cifs/Stats
54 60
55n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed 61n) Add support for claims based ACLs ("DAC")
56file attribute via chflags) and improve user space tools for managing and
57viewing them.
58 62
59o) mount helper GUI (to simplify the various configuration options on mount) 63o) mount helper GUI (to simplify the various configuration options on mount)
60 64
@@ -82,6 +86,8 @@ so far).
82w) Add support for additional strong encryption types, and additional spnego 86w) Add support for additional strong encryption types, and additional spnego
83authentication mechanisms (see MS-SMB2) 87authentication mechanisms (see MS-SMB2)
84 88
89x) Finish support for SMB3.1.1 compression
90
85KNOWN BUGS 91KNOWN BUGS
86==================================== 92====================================
87See http://bugzilla.samba.org - search on product "CifsVFS" for 93See http://bugzilla.samba.org - search on product "CifsVFS" for
diff --git a/Documentation/filesystems/conf.py b/Documentation/filesystems/conf.py
deleted file mode 100644
index ea44172af5c4..000000000000
--- a/Documentation/filesystems/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Linux Filesystems API"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'filesystems.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/gpu/conf.py b/Documentation/gpu/conf.py
deleted file mode 100644
index 1757b040fb32..000000000000
--- a/Documentation/gpu/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Linux GPU Driver Developer's Guide"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'gpu.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/hwmon/k8temp.rst b/Documentation/hwmon/k8temp.rst
index 72da12aa17e5..fe9109521056 100644
--- a/Documentation/hwmon/k8temp.rst
+++ b/Documentation/hwmon/k8temp.rst
@@ -9,7 +9,7 @@ Supported chips:
9 9
10 Addresses scanned: PCI space 10 Addresses scanned: PCI space
11 11
12 Datasheet: http://support.amd.com/us/Processor_TechDocs/32559.pdf 12 Datasheet: http://www.amd.com/system/files/TechDocs/32559.pdf
13 13
14Author: Rudolf Marek 14Author: Rudolf Marek
15 15
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 70ae148ec980..2df5a3da563c 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -111,9 +111,11 @@ needed).
111 netlabel/index 111 netlabel/index
112 networking/index 112 networking/index
113 pcmcia/index 113 pcmcia/index
114 power/index
114 target/index 115 target/index
115 timers/index 116 timers/index
116 watchdog/index 117 watchdog/index
118 virtual/index
117 input/index 119 input/index
118 hwmon/index 120 hwmon/index
119 gpu/index 121 gpu/index
@@ -143,6 +145,7 @@ implementation.
143 arm64/index 145 arm64/index
144 ia64/index 146 ia64/index
145 m68k/index 147 m68k/index
148 powerpc/index
146 riscv/index 149 riscv/index
147 s390/index 150 s390/index
148 sh/index 151 sh/index
diff --git a/Documentation/input/conf.py b/Documentation/input/conf.py
deleted file mode 100644
index d2352fdc92ed..000000000000
--- a/Documentation/input/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "The Linux input driver subsystem"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'linux-input.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/kernel-hacking/conf.py b/Documentation/kernel-hacking/conf.py
deleted file mode 100644
index 3d8acf0f33ad..000000000000
--- a/Documentation/kernel-hacking/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Kernel Hacking Guides"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'kernel-hacking.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/locking/spinlocks.rst b/Documentation/locking/spinlocks.rst
index 098107fb7d86..e93ec6645238 100644
--- a/Documentation/locking/spinlocks.rst
+++ b/Documentation/locking/spinlocks.rst
@@ -82,7 +82,7 @@ itself. The read lock allows many concurrent readers. Anything that
82**changes** the list will have to get the write lock. 82**changes** the list will have to get the write lock.
83 83
84 NOTE! RCU is better for list traversal, but requires careful 84 NOTE! RCU is better for list traversal, but requires careful
85 attention to design detail (see Documentation/RCU/listRCU.txt). 85 attention to design detail (see Documentation/RCU/listRCU.rst).
86 86
87Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ 87Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
88time need to do any changes (even if you don't do it every time), you have 88time need to do any changes (even if you don't do it every time), you have
@@ -90,7 +90,7 @@ to get the write-lock at the very beginning.
90 90
91 NOTE! We are working hard to remove reader-writer spinlocks in most 91 NOTE! We are working hard to remove reader-writer spinlocks in most
92 cases, so please don't add a new one without consensus. (Instead, see 92 cases, so please don't add a new one without consensus. (Instead, see
93 Documentation/RCU/rcu.txt for complete information.) 93 Documentation/RCU/rcu.rst for complete information.)
94 94
95---- 95----
96 96
diff --git a/Documentation/maintainer/conf.py b/Documentation/maintainer/conf.py
deleted file mode 100644
index 81e9eb7a7884..000000000000
--- a/Documentation/maintainer/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = 'Linux Kernel Development Documentation'
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'maintainer.tex', 'Linux Kernel Development Documentation',
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/media/conf.py b/Documentation/media/conf.py
deleted file mode 100644
index 1f194fcd2cae..000000000000
--- a/Documentation/media/conf.py
+++ /dev/null
@@ -1,12 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3# SPDX-License-Identifier: GPL-2.0
4
5project = 'Linux Media Subsystem Documentation'
6
7tags.add("subproject")
8
9latex_documents = [
10 ('index', 'media.tex', 'Linux Media Subsystem Documentation',
11 'The kernel development community', 'manual'),
12]
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 045bb8148fe9..1adbb8a371c7 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -548,7 +548,7 @@ There are certain things that the Linux kernel memory barriers do not guarantee:
548 548
549 [*] For information on bus mastering DMA and coherency please read: 549 [*] For information on bus mastering DMA and coherency please read:
550 550
551 Documentation/PCI/pci.rst 551 Documentation/driver-api/pci/pci.rst
552 Documentation/DMA-API-HOWTO.txt 552 Documentation/DMA-API-HOWTO.txt
553 Documentation/DMA-API.txt 553 Documentation/DMA-API.txt
554 554
diff --git a/Documentation/networking/conf.py b/Documentation/networking/conf.py
deleted file mode 100644
index 40f69e67a883..000000000000
--- a/Documentation/networking/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Linux Networking Documentation"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'networking.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index 048e5ca44824..0dd3f748239f 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -424,13 +424,24 @@ Statistics
424Following minimum set of TLS-related statistics should be reported 424Following minimum set of TLS-related statistics should be reported
425by the driver: 425by the driver:
426 426
427 * ``rx_tls_decrypted`` - number of successfully decrypted TLS segments 427 * ``rx_tls_decrypted_packets`` - number of successfully decrypted RX packets
428 * ``tx_tls_encrypted`` - number of in-order TLS segments passed to device 428 which were part of a TLS stream.
429 for encryption 429 * ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
430 which were successfully decrypted.
431 * ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
432 for encryption of their TLS payload.
433 * ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
434 passed to the device for encryption.
435 * ``tx_tls_ctx`` - number of TLS TX HW offload contexts added to device for
436 encryption.
430 * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream 437 * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
431 but did not arrive in the expected order 438 but did not arrive in the expected order.
432 * ``tx_tls_drop_no_sync_data`` - number of TX packets dropped because 439 * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
433 they arrived out of order and associated record could not be found 440 a TLS stream dropped, because they arrived out of order and associated
441 record could not be found.
442 * ``tx_tls_drop_bypass_req`` - number of TX packets which were part of a TLS
443 stream dropped, because they contain both data that has been encrypted by
444 software and data that expects hardware crypto offload.
434 445
435Notable corner cases, exceptions and additional requirements 446Notable corner cases, exceptions and additional requirements
436============================================================ 447============================================================
@@ -495,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags.
495These flags will be acted upon accordingly by the core ``ktls`` code. 506These flags will be acted upon accordingly by the core ``ktls`` code.
496TLS device feature flags only control adding of new TLS connection 507TLS device feature flags only control adding of new TLS connection
497offloads, old connections will remain active after flags are cleared. 508offloads, old connections will remain active after flags are cleared.
498
499Known bugs
500==========
501
502skb_orphan() leaks clear text
503-----------------------------
504
505Currently drivers depend on the :c:member:`sk` member of
506:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
507encryption. Any operation which removes or does not preserve the socket
508association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
509will cause the driver to miss the packets and lead to clear text leaks.
510
511Redirects leak clear text
512-------------------------
513
514In the RX direction, if segment has already been decrypted by the device
515and it gets redirected or mirrored - clear text will be transmitted out.
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index 949d5dcdd9a3..0104830d5075 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical
204media, receives them from user space program and instead of sending 204media, receives them from user space program and instead of sending
205packets via physical media sends them to the user space program. 205packets via physical media sends them to the user space program.
206 206
207Let's say that you configured IPX on the tap0, then whenever 207Let's say that you configured IPv6 on the tap0, then whenever
208the kernel sends an IPX packet to tap0, it is passed to the application 208the kernel sends an IPv6 packet to tap0, it is passed to the application
209(VTun for example). The application encrypts, compresses and sends it to 209(VTun for example). The application encrypts, compresses and sends it to
210the other side over TCP or UDP. The application on the other side decompresses 210the other side over TCP or UDP. The application on the other side decompresses
211and decrypts the data received and writes the packet to the TAP device, 211and decrypts the data received and writes the packet to the TAP device,
diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst
index 20415f21e48a..002e42745263 100644
--- a/Documentation/power/index.rst
+++ b/Documentation/power/index.rst
@@ -1,4 +1,4 @@
1:orphan: 1.. SPDX-License-Identifier: GPL-2.0
2 2
3================ 3================
4Power Management 4Power Management
diff --git a/Documentation/powerpc/bootwrapper.txt b/Documentation/powerpc/bootwrapper.rst
index d60fced5e1cc..a6292afba573 100644
--- a/Documentation/powerpc/bootwrapper.txt
+++ b/Documentation/powerpc/bootwrapper.rst
@@ -1,5 +1,7 @@
1========================
1The PowerPC boot wrapper 2The PowerPC boot wrapper
2------------------------ 3========================
4
3Copyright (C) Secret Lab Technologies Ltd. 5Copyright (C) Secret Lab Technologies Ltd.
4 6
5PowerPC image targets compresses and wraps the kernel image (vmlinux) with 7PowerPC image targets compresses and wraps the kernel image (vmlinux) with
@@ -21,6 +23,7 @@ it uses the wrapper script (arch/powerpc/boot/wrapper) to generate target
21image. The details of the build system is discussed in the next section. 23image. The details of the build system is discussed in the next section.
22Currently, the following image format targets exist: 24Currently, the following image format targets exist:
23 25
26 ==================== ========================================================
24 cuImage.%: Backwards compatible uImage for older version of 27 cuImage.%: Backwards compatible uImage for older version of
25 U-Boot (for versions that don't understand the device 28 U-Boot (for versions that don't understand the device
26 tree). This image embeds a device tree blob inside 29 tree). This image embeds a device tree blob inside
@@ -29,31 +32,36 @@ Currently, the following image format targets exist:
29 with boot wrapper code that extracts data from the old 32 with boot wrapper code that extracts data from the old
30 bd_info structure and loads the data into the device 33 bd_info structure and loads the data into the device
31 tree before jumping into the kernel. 34 tree before jumping into the kernel.
32 Because of the series of #ifdefs found in the 35
36 Because of the series of #ifdefs found in the
33 bd_info structure used in the old U-Boot interfaces, 37 bd_info structure used in the old U-Boot interfaces,
34 cuImages are platform specific. Each specific 38 cuImages are platform specific. Each specific
35 U-Boot platform has a different platform init file 39 U-Boot platform has a different platform init file
36 which populates the embedded device tree with data 40 which populates the embedded device tree with data
37 from the platform specific bd_info file. The platform 41 from the platform specific bd_info file. The platform
38 specific cuImage platform init code can be found in 42 specific cuImage platform init code can be found in
39 arch/powerpc/boot/cuboot.*.c. Selection of the correct 43 `arch/powerpc/boot/cuboot.*.c`. Selection of the correct
40 cuImage init code for a specific board can be found in 44 cuImage init code for a specific board can be found in
41 the wrapper structure. 45 the wrapper structure.
46
42 dtbImage.%: Similar to zImage, except device tree blob is embedded 47 dtbImage.%: Similar to zImage, except device tree blob is embedded
43 inside the image instead of provided by firmware. The 48 inside the image instead of provided by firmware. The
44 output image file can be either an elf file or a flat 49 output image file can be either an elf file or a flat
45 binary depending on the platform. 50 binary depending on the platform.
46 dtbImages are used on systems which do not have an 51
52 dtbImages are used on systems which do not have an
47 interface for passing a device tree directly. 53 interface for passing a device tree directly.
48 dtbImages are similar to simpleImages except that 54 dtbImages are similar to simpleImages except that
49 dtbImages have platform specific code for extracting 55 dtbImages have platform specific code for extracting
50 data from the board firmware, but simpleImages do not 56 data from the board firmware, but simpleImages do not
51 talk to the firmware at all. 57 talk to the firmware at all.
52 PlayStation 3 support uses dtbImage. So do Embedded 58
59 PlayStation 3 support uses dtbImage. So do Embedded
53 Planet boards using the PlanetCore firmware. Board 60 Planet boards using the PlanetCore firmware. Board
54 specific initialization code is typically found in a 61 specific initialization code is typically found in a
55 file named arch/powerpc/boot/<platform>.c; but this 62 file named arch/powerpc/boot/<platform>.c; but this
56 can be overridden by the wrapper script. 63 can be overridden by the wrapper script.
64
57 simpleImage.%: Firmware independent compressed image that does not 65 simpleImage.%: Firmware independent compressed image that does not
58 depend on any particular firmware interface and embeds 66 depend on any particular firmware interface and embeds
59 a device tree blob. This image is a flat binary that 67 a device tree blob. This image is a flat binary that
@@ -61,14 +69,16 @@ Currently, the following image format targets exist:
61 Firmware cannot pass any configuration data to the 69 Firmware cannot pass any configuration data to the
62 kernel with this image type and it depends entirely on 70 kernel with this image type and it depends entirely on
63 the embedded device tree for all information. 71 the embedded device tree for all information.
64 The simpleImage is useful for booting systems with 72
73 The simpleImage is useful for booting systems with
65 an unknown firmware interface or for booting from 74 an unknown firmware interface or for booting from
66 a debugger when no firmware is present (such as on 75 a debugger when no firmware is present (such as on
67 the Xilinx Virtex platform). The only assumption that 76 the Xilinx Virtex platform). The only assumption that
68 simpleImage makes is that RAM is correctly initialized 77 simpleImage makes is that RAM is correctly initialized
69 and that the MMU is either off or has RAM mapped to 78 and that the MMU is either off or has RAM mapped to
70 base address 0. 79 base address 0.
71 simpleImage also supports inserting special platform 80
81 simpleImage also supports inserting special platform
72 specific initialization code to the start of the bootup 82 specific initialization code to the start of the bootup
73 sequence. The virtex405 platform uses this feature to 83 sequence. The virtex405 platform uses this feature to
74 ensure that the cache is invalidated before caching 84 ensure that the cache is invalidated before caching
@@ -81,9 +91,11 @@ Currently, the following image format targets exist:
81 named (virtex405-<board>.dts). Search the wrapper 91 named (virtex405-<board>.dts). Search the wrapper
82 script for 'virtex405' and see the file 92 script for 'virtex405' and see the file
83 arch/powerpc/boot/virtex405-head.S for details. 93 arch/powerpc/boot/virtex405-head.S for details.
94
84 treeImage.%; Image format for used with OpenBIOS firmware found 95 treeImage.%; Image format for used with OpenBIOS firmware found
85 on some ppc4xx hardware. This image embeds a device 96 on some ppc4xx hardware. This image embeds a device
86 tree blob inside the image. 97 tree blob inside the image.
98
87 uImage: Native image format used by U-Boot. The uImage target 99 uImage: Native image format used by U-Boot. The uImage target
88 does not add any boot code. It just wraps a compressed 100 does not add any boot code. It just wraps a compressed
89 vmlinux in the uImage data structure. This image 101 vmlinux in the uImage data structure. This image
@@ -91,12 +103,14 @@ Currently, the following image format targets exist:
91 a device tree to the kernel at boot. If using an older 103 a device tree to the kernel at boot. If using an older
92 version of U-Boot, then you need to use a cuImage 104 version of U-Boot, then you need to use a cuImage
93 instead. 105 instead.
106
94 zImage.%: Image format which does not embed a device tree. 107 zImage.%: Image format which does not embed a device tree.
95 Used by OpenFirmware and other firmware interfaces 108 Used by OpenFirmware and other firmware interfaces
96 which are able to supply a device tree. This image 109 which are able to supply a device tree. This image
97 expects firmware to provide the device tree at boot. 110 expects firmware to provide the device tree at boot.
98 Typically, if you have general purpose PowerPC 111 Typically, if you have general purpose PowerPC
99 hardware then you want this image format. 112 hardware then you want this image format.
113 ==================== ========================================================
100 114
101Image types which embed a device tree blob (simpleImage, dtbImage, treeImage, 115Image types which embed a device tree blob (simpleImage, dtbImage, treeImage,
102and cuImage) all generate the device tree blob from a file in the 116and cuImage) all generate the device tree blob from a file in the
diff --git a/Documentation/powerpc/cpu_families.txt b/Documentation/powerpc/cpu_families.rst
index fc08e22feb1a..1e063c5440c3 100644
--- a/Documentation/powerpc/cpu_families.txt
+++ b/Documentation/powerpc/cpu_families.rst
@@ -1,3 +1,4 @@
1============
1CPU Families 2CPU Families
2============ 3============
3 4
@@ -8,8 +9,8 @@ and are supported by arch/powerpc.
8Book3S (aka sPAPR) 9Book3S (aka sPAPR)
9------------------ 10------------------
10 11
11 - Hash MMU 12- Hash MMU
12 - Mix of 32 & 64 bit 13- Mix of 32 & 64 bit::
13 14
14 +--------------+ +----------------+ 15 +--------------+ +----------------+
15 | Old POWER | --------------> | RS64 (threads) | 16 | Old POWER | --------------> | RS64 (threads) |
@@ -108,8 +109,8 @@ Book3S (aka sPAPR)
108IBM BookE 109IBM BookE
109--------- 110---------
110 111
111 - Software loaded TLB. 112- Software loaded TLB.
112 - All 32 bit 113- All 32 bit::
113 114
114 +--------------+ 115 +--------------+
115 | 401 | 116 | 401 |
@@ -155,8 +156,8 @@ IBM BookE
155Motorola/Freescale 8xx 156Motorola/Freescale 8xx
156---------------------- 157----------------------
157 158
158 - Software loaded with hardware assist. 159- Software loaded with hardware assist.
159 - All 32 bit 160- All 32 bit::
160 161
161 +-------------+ 162 +-------------+
162 | MPC8xx Core | 163 | MPC8xx Core |
@@ -166,9 +167,9 @@ Motorola/Freescale 8xx
166Freescale BookE 167Freescale BookE
167--------------- 168---------------
168 169
169 - Software loaded TLB. 170- Software loaded TLB.
170 - e6500 adds HW loaded indirect TLB entries. 171- e6500 adds HW loaded indirect TLB entries.
171 - Mix of 32 & 64 bit 172- Mix of 32 & 64 bit::
172 173
173 +--------------+ 174 +--------------+
174 | e200 | 175 | e200 |
@@ -207,8 +208,8 @@ Freescale BookE
207IBM A2 core 208IBM A2 core
208----------- 209-----------
209 210
210 - Book3E, software loaded TLB + HW loaded indirect TLB entries. 211- Book3E, software loaded TLB + HW loaded indirect TLB entries.
211 - 64 bit 212- 64 bit::
212 213
213 +--------------+ +----------------+ 214 +--------------+ +----------------+
214 | A2 core | --> | WSP | 215 | A2 core | --> | WSP |
diff --git a/Documentation/powerpc/cpu_features.txt b/Documentation/powerpc/cpu_features.rst
index ae09df8722c8..b7bcdd2f41bb 100644
--- a/Documentation/powerpc/cpu_features.txt
+++ b/Documentation/powerpc/cpu_features.rst
@@ -1,3 +1,7 @@
1============
2CPU Features
3============
4
1Hollis Blanchard <hollis@austin.ibm.com> 5Hollis Blanchard <hollis@austin.ibm.com>
25 Jun 2002 65 Jun 2002
3 7
@@ -32,7 +36,7 @@ anyways).
32After detecting the processor type, the kernel patches out sections of code 36After detecting the processor type, the kernel patches out sections of code
33that shouldn't be used by writing nop's over it. Using cpufeatures requires 37that shouldn't be used by writing nop's over it. Using cpufeatures requires
34just 2 macros (found in arch/powerpc/include/asm/cputable.h), as seen in head.S 38just 2 macros (found in arch/powerpc/include/asm/cputable.h), as seen in head.S
35transfer_to_handler: 39transfer_to_handler::
36 40
37 #ifdef CONFIG_ALTIVEC 41 #ifdef CONFIG_ALTIVEC
38 BEGIN_FTR_SECTION 42 BEGIN_FTR_SECTION
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.rst
index c5e8d5098ed3..920546d81326 100644
--- a/Documentation/powerpc/cxl.txt
+++ b/Documentation/powerpc/cxl.rst
@@ -1,3 +1,4 @@
1====================================
1Coherent Accelerator Interface (CXL) 2Coherent Accelerator Interface (CXL)
2==================================== 3====================================
3 4
@@ -21,6 +22,8 @@ Introduction
21Hardware overview 22Hardware overview
22================= 23=================
23 24
25 ::
26
24 POWER8/9 FPGA 27 POWER8/9 FPGA
25 +----------+ +---------+ 28 +----------+ +---------+
26 | | | | 29 | | | |
@@ -59,14 +62,16 @@ Hardware overview
59 the fault. The context to which this fault is serviced is based on 62 the fault. The context to which this fault is serviced is based on
60 who owns that acceleration function. 63 who owns that acceleration function.
61 64
62 POWER8 <-----> PSL Version 8 is compliant to the CAIA Version 1.0. 65 - POWER8 and PSL Version 8 are compliant to the CAIA Version 1.0.
63 POWER9 <-----> PSL Version 9 is compliant to the CAIA Version 2.0. 66 - POWER9 and PSL Version 9 are compliant to the CAIA Version 2.0.
67
64 This PSL Version 9 provides new features such as: 68 This PSL Version 9 provides new features such as:
69
65 * Interaction with the nest MMU on the P9 chip. 70 * Interaction with the nest MMU on the P9 chip.
66 * Native DMA support. 71 * Native DMA support.
67 * Supports sending ASB_Notify messages for host thread wakeup. 72 * Supports sending ASB_Notify messages for host thread wakeup.
68 * Supports Atomic operations. 73 * Supports Atomic operations.
69 * .... 74 * etc.
70 75
71 Cards with a PSL9 won't work on a POWER8 system and cards with a 76 Cards with a PSL9 won't work on a POWER8 system and cards with a
72 PSL8 won't work on a POWER9 system. 77 PSL8 won't work on a POWER9 system.
@@ -147,7 +152,9 @@ User API
147 master devices. 152 master devices.
148 153
149 A userspace library libcxl is available here: 154 A userspace library libcxl is available here:
155
150 https://github.com/ibm-capi/libcxl 156 https://github.com/ibm-capi/libcxl
157
151 This provides a C interface to this kernel API. 158 This provides a C interface to this kernel API.
152 159
153open 160open
@@ -165,7 +172,8 @@ open
165 When all available contexts are allocated the open call will fail 172 When all available contexts are allocated the open call will fail
166 and return -ENOSPC. 173 and return -ENOSPC.
167 174
168 Note: IRQs need to be allocated for each context, which may limit 175 Note:
176 IRQs need to be allocated for each context, which may limit
169 the number of contexts that can be created, and therefore 177 the number of contexts that can be created, and therefore
170 how many times the device can be opened. The POWER8 CAPP 178 how many times the device can be opened. The POWER8 CAPP
171 supports 2040 IRQs and 3 are used by the kernel, so 2037 are 179 supports 2040 IRQs and 3 are used by the kernel, so 2037 are
@@ -186,7 +194,9 @@ ioctl
186 updated as userspace allocates and frees memory. This ioctl 194 updated as userspace allocates and frees memory. This ioctl
187 returns once the AFU context is started. 195 returns once the AFU context is started.
188 196
189 Takes a pointer to a struct cxl_ioctl_start_work: 197 Takes a pointer to a struct cxl_ioctl_start_work
198
199 ::
190 200
191 struct cxl_ioctl_start_work { 201 struct cxl_ioctl_start_work {
192 __u64 flags; 202 __u64 flags;
@@ -269,7 +279,7 @@ read
269 The buffer passed to read() must be at least 4K bytes. 279 The buffer passed to read() must be at least 4K bytes.
270 280
271 The result of the read will be a buffer of one or more events, 281 The result of the read will be a buffer of one or more events,
272 each event is of type struct cxl_event, of varying size. 282 each event is of type struct cxl_event, of varying size::
273 283
274 struct cxl_event { 284 struct cxl_event {
275 struct cxl_event_header header; 285 struct cxl_event_header header;
@@ -280,7 +290,9 @@ read
280 }; 290 };
281 }; 291 };
282 292
283 The struct cxl_event_header is defined as: 293 The struct cxl_event_header is defined as
294
295 ::
284 296
285 struct cxl_event_header { 297 struct cxl_event_header {
286 __u16 type; 298 __u16 type;
@@ -307,7 +319,9 @@ read
307 For future extensions and padding. 319 For future extensions and padding.
308 320
309 If the event type is CXL_EVENT_AFU_INTERRUPT then the event 321 If the event type is CXL_EVENT_AFU_INTERRUPT then the event
310 structure is defined as: 322 structure is defined as
323
324 ::
311 325
312 struct cxl_event_afu_interrupt { 326 struct cxl_event_afu_interrupt {
313 __u16 flags; 327 __u16 flags;
@@ -326,7 +340,9 @@ read
326 For future extensions and padding. 340 For future extensions and padding.
327 341
328 If the event type is CXL_EVENT_DATA_STORAGE then the event 342 If the event type is CXL_EVENT_DATA_STORAGE then the event
329 structure is defined as: 343 structure is defined as
344
345 ::
330 346
331 struct cxl_event_data_storage { 347 struct cxl_event_data_storage {
332 __u16 flags; 348 __u16 flags;
@@ -356,7 +372,9 @@ read
356 For future extensions 372 For future extensions
357 373
358 If the event type is CXL_EVENT_AFU_ERROR then the event structure 374 If the event type is CXL_EVENT_AFU_ERROR then the event structure
359 is defined as: 375 is defined as
376
377 ::
360 378
361 struct cxl_event_afu_error { 379 struct cxl_event_afu_error {
362 __u16 flags; 380 __u16 flags;
@@ -393,15 +411,15 @@ open
393ioctl 411ioctl
394----- 412-----
395 413
396CXL_IOCTL_DOWNLOAD_IMAGE: 414CXL_IOCTL_DOWNLOAD_IMAGE / CXL_IOCTL_VALIDATE_IMAGE:
397CXL_IOCTL_VALIDATE_IMAGE:
398 Starts and controls flashing a new FPGA image. Partial 415 Starts and controls flashing a new FPGA image. Partial
399 reconfiguration is not supported (yet), so the image must contain 416 reconfiguration is not supported (yet), so the image must contain
400 a copy of the PSL and AFU(s). Since an image can be quite large, 417 a copy of the PSL and AFU(s). Since an image can be quite large,
401 the caller may have to iterate, splitting the image in smaller 418 the caller may have to iterate, splitting the image in smaller
402 chunks. 419 chunks.
403 420
404 Takes a pointer to a struct cxl_adapter_image: 421 Takes a pointer to a struct cxl_adapter_image::
422
405 struct cxl_adapter_image { 423 struct cxl_adapter_image {
406 __u64 flags; 424 __u64 flags;
407 __u64 data; 425 __u64 data;
@@ -442,7 +460,7 @@ Udev rules
442 The following udev rules could be used to create a symlink to the 460 The following udev rules could be used to create a symlink to the
443 most logical chardev to use in any programming mode (afuX.Yd for 461 most logical chardev to use in any programming mode (afuX.Yd for
444 dedicated, afuX.Ys for afu directed), since the API is virtually 462 dedicated, afuX.Ys for afu directed), since the API is virtually
445 identical for each: 463 identical for each::
446 464
447 SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b" 465 SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b"
448 SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \ 466 SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.rst
index a64bdaa0a1cf..cea67931b3b9 100644
--- a/Documentation/powerpc/cxlflash.txt
+++ b/Documentation/powerpc/cxlflash.rst
@@ -1,3 +1,7 @@
1================================
2Coherent Accelerator (CXL) Flash
3================================
4
1Introduction 5Introduction
2============ 6============
3 7
@@ -28,7 +32,7 @@ Introduction
28 responsible for the initialization of the adapter, setting up the 32 responsible for the initialization of the adapter, setting up the
29 special path for user space access, and performing error recovery. It 33 special path for user space access, and performing error recovery. It
30 communicates directly the Flash Accelerator Functional Unit (AFU) 34 communicates directly the Flash Accelerator Functional Unit (AFU)
31 as described in Documentation/powerpc/cxl.txt. 35 as described in Documentation/powerpc/cxl.rst.
32 36
33 The cxlflash driver supports two, mutually exclusive, modes of 37 The cxlflash driver supports two, mutually exclusive, modes of
34 operation at the device (LUN) level: 38 operation at the device (LUN) level:
@@ -58,7 +62,7 @@ Overview
58 62
59 The CXL Flash Adapter Driver establishes a master context with the 63 The CXL Flash Adapter Driver establishes a master context with the
60 AFU. It uses memory mapped I/O (MMIO) for this control and setup. The 64 AFU. It uses memory mapped I/O (MMIO) for this control and setup. The
61 Adapter Problem Space Memory Map looks like this: 65 Adapter Problem Space Memory Map looks like this::
62 66
63 +-------------------------------+ 67 +-------------------------------+
64 | 512 * 64 KB User MMIO | 68 | 512 * 64 KB User MMIO |
@@ -375,7 +379,7 @@ CXL Flash Driver Host IOCTLs
375 Each host adapter instance that is supported by the cxlflash driver 379 Each host adapter instance that is supported by the cxlflash driver
376 has a special character device associated with it to enable a set of 380 has a special character device associated with it to enable a set of
377 host management function. These character devices are hosted in a 381 host management function. These character devices are hosted in a
378 class dedicated for cxlflash and can be accessed via /dev/cxlflash/*. 382 class dedicated for cxlflash and can be accessed via `/dev/cxlflash/*`.
379 383
380 Applications can be written to perform various functions using the 384 Applications can be written to perform various functions using the
381 host ioctl APIs below. 385 host ioctl APIs below.
diff --git a/Documentation/powerpc/DAWR-POWER9.txt b/Documentation/powerpc/dawr-power9.rst
index ecdbb076438c..c96ab6befd9c 100644
--- a/Documentation/powerpc/DAWR-POWER9.txt
+++ b/Documentation/powerpc/dawr-power9.rst
@@ -1,10 +1,11 @@
1=====================
1DAWR issues on POWER9 2DAWR issues on POWER9
2============================ 3=====================
3 4
4On POWER9 the Data Address Watchpoint Register (DAWR) can cause a checkstop 5On POWER9 the Data Address Watchpoint Register (DAWR) can cause a checkstop
5if it points to cache inhibited (CI) memory. Currently Linux has no way to 6if it points to cache inhibited (CI) memory. Currently Linux has no way to
6disinguish CI memory when configuring the DAWR, so (for now) the DAWR is 7disinguish CI memory when configuring the DAWR, so (for now) the DAWR is
7disabled by this commit: 8disabled by this commit::
8 9
9 commit 9654153158d3e0684a1bdb76dbababdb7111d5a0 10 commit 9654153158d3e0684a1bdb76dbababdb7111d5a0
10 Author: Michael Neuling <mikey@neuling.org> 11 Author: Michael Neuling <mikey@neuling.org>
@@ -12,7 +13,7 @@ disabled by this commit:
12 powerpc: Disable DAWR in the base POWER9 CPU features 13 powerpc: Disable DAWR in the base POWER9 CPU features
13 14
14Technical Details: 15Technical Details:
15============================ 16==================
16 17
17DAWR has 6 different ways of being set. 18DAWR has 6 different ways of being set.
181) ptrace 191) ptrace
@@ -37,7 +38,7 @@ DAWR on the migration.
37For xmon, the 'bd' command will return an error on P9. 38For xmon, the 'bd' command will return an error on P9.
38 39
39Consequences for users 40Consequences for users
40============================ 41======================
41 42
42For GDB watchpoints (ie 'watch' command) on POWER9 bare metal , GDB 43For GDB watchpoints (ie 'watch' command) on POWER9 bare metal , GDB
43will accept the command. Unfortunately since there is no hardware 44will accept the command. Unfortunately since there is no hardware
@@ -57,8 +58,8 @@ trapped in GDB. The watchpoint is remembered, so if the guest is
57migrated back to the POWER8 host, it will start working again. 58migrated back to the POWER8 host, it will start working again.
58 59
59Force enabling the DAWR 60Force enabling the DAWR
60============================= 61=======================
61Kernels (since ~v5.2) have an option to force enable the DAWR via: 62Kernels (since ~v5.2) have an option to force enable the DAWR via::
62 63
63 echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous 64 echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous
64 65
@@ -86,5 +87,7 @@ dawr_enable_dangerous file will fail if the hypervisor doesn't support
86writing the DAWR. 87writing the DAWR.
87 88
88To double check the DAWR is working, run this kernel selftest: 89To double check the DAWR is working, run this kernel selftest:
90
89 tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c 91 tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
92
90Any errors/failures/skips mean something is wrong. 93Any errors/failures/skips mean something is wrong.
diff --git a/Documentation/powerpc/dscr.txt b/Documentation/powerpc/dscr.rst
index ece300c64f76..2ab99006014c 100644
--- a/Documentation/powerpc/dscr.txt
+++ b/Documentation/powerpc/dscr.rst
@@ -1,5 +1,6 @@
1 DSCR (Data Stream Control Register) 1===================================
2 ================================================ 2DSCR (Data Stream Control Register)
3===================================
3 4
4DSCR register in powerpc allows user to have some control of prefetch of data 5DSCR register in powerpc allows user to have some control of prefetch of data
5stream in the processor. Please refer to the ISA documents or related manual 6stream in the processor. Please refer to the ISA documents or related manual
@@ -10,14 +11,17 @@ user interface.
10 11
11(A) Data Structures: 12(A) Data Structures:
12 13
13 (1) thread_struct: 14 (1) thread_struct::
15
14 dscr /* Thread DSCR value */ 16 dscr /* Thread DSCR value */
15 dscr_inherit /* Thread has changed default DSCR */ 17 dscr_inherit /* Thread has changed default DSCR */
16 18
17 (2) PACA: 19 (2) PACA::
20
18 dscr_default /* per-CPU DSCR default value */ 21 dscr_default /* per-CPU DSCR default value */
19 22
20 (3) sysfs.c: 23 (3) sysfs.c::
24
21 dscr_default /* System DSCR default value */ 25 dscr_default /* System DSCR default value */
22 26
23(B) Scheduler Changes: 27(B) Scheduler Changes:
@@ -35,8 +39,8 @@ user interface.
35 39
36(C) SYSFS Interface: 40(C) SYSFS Interface:
37 41
38 Global DSCR default: /sys/devices/system/cpu/dscr_default 42 - Global DSCR default: /sys/devices/system/cpu/dscr_default
39 CPU specific DSCR default: /sys/devices/system/cpu/cpuN/dscr 43 - CPU specific DSCR default: /sys/devices/system/cpu/cpuN/dscr
40 44
41 Changing the global DSCR default in the sysfs will change all the CPU 45 Changing the global DSCR default in the sysfs will change all the CPU
42 specific DSCR defaults immediately in their PACA structures. Again if 46 specific DSCR defaults immediately in their PACA structures. Again if
diff --git a/Documentation/powerpc/eeh-pci-error-recovery.txt b/Documentation/powerpc/eeh-pci-error-recovery.rst
index 678189280bb4..438a87ebc095 100644
--- a/Documentation/powerpc/eeh-pci-error-recovery.txt
+++ b/Documentation/powerpc/eeh-pci-error-recovery.rst
@@ -1,10 +1,10 @@
1==========================
2PCI Bus EEH Error Recovery
3==========================
1 4
5Linas Vepstas <linas@austin.ibm.com>
2 6
3 PCI Bus EEH Error Recovery 712 January 2005
4 --------------------------
5 Linas Vepstas
6 <linas@austin.ibm.com>
7 12 January 2005
8 8
9 9
10Overview: 10Overview:
@@ -143,17 +143,17 @@ seen in /proc/ppc64/eeh (subject to change). Normally, almost
143all of these occur during boot, when the PCI bus is scanned, where 143all of these occur during boot, when the PCI bus is scanned, where
144a large number of 0xff reads are part of the bus scan procedure. 144a large number of 0xff reads are part of the bus scan procedure.
145 145
146If a frozen slot is detected, code in 146If a frozen slot is detected, code in
147arch/powerpc/platforms/pseries/eeh.c will print a stack trace to 147arch/powerpc/platforms/pseries/eeh.c will print a stack trace to
148syslog (/var/log/messages). This stack trace has proven to be very 148syslog (/var/log/messages). This stack trace has proven to be very
149useful to device-driver authors for finding out at what point the EEH 149useful to device-driver authors for finding out at what point the EEH
150error was detected, as the error itself usually occurs slightly 150error was detected, as the error itself usually occurs slightly
151beforehand. 151beforehand.
152 152
153Next, it uses the Linux kernel notifier chain/work queue mechanism to 153Next, it uses the Linux kernel notifier chain/work queue mechanism to
154allow any interested parties to find out about the failure. Device 154allow any interested parties to find out about the failure. Device
155drivers, or other parts of the kernel, can use 155drivers, or other parts of the kernel, can use
156eeh_register_notifier(struct notifier_block *) to find out about EEH 156`eeh_register_notifier(struct notifier_block *)` to find out about EEH
157events. The event will include a pointer to the pci device, the 157events. The event will include a pointer to the pci device, the
158device node and some state info. Receivers of the event can "do as 158device node and some state info. Receivers of the event can "do as
159they wish"; the default handler will be described further in this 159they wish"; the default handler will be described further in this
@@ -162,10 +162,13 @@ section.
162To assist in the recovery of the device, eeh.c exports the 162To assist in the recovery of the device, eeh.c exports the
163following functions: 163following functions:
164 164
165rtas_set_slot_reset() -- assert the PCI #RST line for 1/8th of a second 165rtas_set_slot_reset()
166rtas_configure_bridge() -- ask firmware to configure any PCI bridges 166 assert the PCI #RST line for 1/8th of a second
167rtas_configure_bridge()
168 ask firmware to configure any PCI bridges
167 located topologically under the pci slot. 169 located topologically under the pci slot.
168eeh_save_bars() and eeh_restore_bars(): save and restore the PCI 170eeh_save_bars() and eeh_restore_bars():
171 save and restore the PCI
169 config-space info for a device and any devices under it. 172 config-space info for a device and any devices under it.
170 173
171 174
@@ -191,7 +194,7 @@ events get delivered to user-space scripts.
191 194
192Following is an example sequence of events that cause a device driver 195Following is an example sequence of events that cause a device driver
193close function to be called during the first phase of an EEH reset. 196close function to be called during the first phase of an EEH reset.
194The following sequence is an example of the pcnet32 device driver. 197The following sequence is an example of the pcnet32 device driver::
195 198
196 rpa_php_unconfig_pci_adapter (struct slot *) // in rpaphp_pci.c 199 rpa_php_unconfig_pci_adapter (struct slot *) // in rpaphp_pci.c
197 { 200 {
@@ -241,53 +244,54 @@ The following sequence is an example of the pcnet32 device driver.
241 }}}}}} 244 }}}}}}
242 245
243 246
244 in drivers/pci/pci_driver.c, 247in drivers/pci/pci_driver.c,
245 struct device_driver->remove() is just pci_device_remove() 248struct device_driver->remove() is just pci_device_remove()
246 which calls struct pci_driver->remove() which is pcnet32_remove_one() 249which calls struct pci_driver->remove() which is pcnet32_remove_one()
247 which calls unregister_netdev() (in net/core/dev.c) 250which calls unregister_netdev() (in net/core/dev.c)
248 which calls dev_close() (in net/core/dev.c) 251which calls dev_close() (in net/core/dev.c)
249 which calls dev->stop() which is pcnet32_close() 252which calls dev->stop() which is pcnet32_close()
250 which then does the appropriate shutdown. 253which then does the appropriate shutdown.
251 254
252--- 255---
256
253Following is the analogous stack trace for events sent to user-space 257Following is the analogous stack trace for events sent to user-space
254when the pci device is unconfigured. 258when the pci device is unconfigured::
255 259
256rpa_php_unconfig_pci_adapter() { // in rpaphp_pci.c 260 rpa_php_unconfig_pci_adapter() { // in rpaphp_pci.c
257 calls
258 pci_remove_bus_device (struct pci_dev *) { // in /drivers/pci/remove.c
259 calls 261 calls
260 pci_destroy_dev (struct pci_dev *) { 262 pci_remove_bus_device (struct pci_dev *) { // in /drivers/pci/remove.c
261 calls 263 calls
262 device_unregister (&dev->dev) { // in /drivers/base/core.c 264 pci_destroy_dev (struct pci_dev *) {
263 calls 265 calls
264 device_del(struct device * dev) { // in /drivers/base/core.c 266 device_unregister (&dev->dev) { // in /drivers/base/core.c
265 calls 267 calls
266 kobject_del() { //in /libs/kobject.c 268 device_del(struct device * dev) { // in /drivers/base/core.c
267 calls 269 calls
268 kobject_uevent() { // in /libs/kobject.c 270 kobject_del() { //in /libs/kobject.c
269 calls 271 calls
270 kset_uevent() { // in /lib/kobject.c 272 kobject_uevent() { // in /libs/kobject.c
271 calls 273 calls
272 kset->uevent_ops->uevent() // which is really just 274 kset_uevent() { // in /lib/kobject.c
273 a call to
274 dev_uevent() { // in /drivers/base/core.c
275 calls 275 calls
276 dev->bus->uevent() which is really just a call to 276 kset->uevent_ops->uevent() // which is really just
277 pci_uevent () { // in drivers/pci/hotplug.c 277 a call to
278 which prints device name, etc.... 278 dev_uevent() { // in /drivers/base/core.c
279 calls
280 dev->bus->uevent() which is really just a call to
281 pci_uevent () { // in drivers/pci/hotplug.c
282 which prints device name, etc....
283 }
279 } 284 }
280 } 285 then kobject_uevent() sends a netlink uevent to userspace
281 then kobject_uevent() sends a netlink uevent to userspace 286 --> userspace uevent
282 --> userspace uevent 287 (during early boot, nobody listens to netlink events and
283 (during early boot, nobody listens to netlink events and 288 kobject_uevent() executes uevent_helper[], which runs the
284 kobject_uevent() executes uevent_helper[], which runs the 289 event process /sbin/hotplug)
285 event process /sbin/hotplug) 290 }
286 } 291 }
287 } 292 kobject_del() then calls sysfs_remove_dir(), which would
288 kobject_del() then calls sysfs_remove_dir(), which would 293 trigger any user-space daemon that was watching /sysfs,
289 trigger any user-space daemon that was watching /sysfs, 294 and notice the delete event.
290 and notice the delete event.
291 295
292 296
293Pro's and Con's of the Current Design 297Pro's and Con's of the Current Design
@@ -299,12 +303,12 @@ individual device drivers, so that the current design throws a wide net.
299The biggest negative of the design is that it potentially disturbs 303The biggest negative of the design is that it potentially disturbs
300network daemons and file systems that didn't need to be disturbed. 304network daemons and file systems that didn't need to be disturbed.
301 305
302-- A minor complaint is that resetting the network card causes 306- A minor complaint is that resetting the network card causes
303 user-space back-to-back ifdown/ifup burps that potentially disturb 307 user-space back-to-back ifdown/ifup burps that potentially disturb
304 network daemons, that didn't need to even know that the pci 308 network daemons, that didn't need to even know that the pci
305 card was being rebooted. 309 card was being rebooted.
306 310
307-- A more serious concern is that the same reset, for SCSI devices, 311- A more serious concern is that the same reset, for SCSI devices,
308 causes havoc to mounted file systems. Scripts cannot post-facto 312 causes havoc to mounted file systems. Scripts cannot post-facto
309 unmount a file system without flushing pending buffers, but this 313 unmount a file system without flushing pending buffers, but this
310 is impossible, because I/O has already been stopped. Thus, 314 is impossible, because I/O has already been stopped. Thus,
@@ -322,7 +326,7 @@ network daemons and file systems that didn't need to be disturbed.
322 from the block layer. It would be very natural to add an EEH 326 from the block layer. It would be very natural to add an EEH
323 reset into this chain of events. 327 reset into this chain of events.
324 328
325-- If a SCSI error occurs for the root device, all is lost unless 329- If a SCSI error occurs for the root device, all is lost unless
326 the sysadmin had the foresight to run /bin, /sbin, /etc, /var 330 the sysadmin had the foresight to run /bin, /sbin, /etc, /var
327 and so on, out of ramdisk/tmpfs. 331 and so on, out of ramdisk/tmpfs.
328 332
@@ -330,5 +334,3 @@ network daemons and file systems that didn't need to be disturbed.
330Conclusions 334Conclusions
331----------- 335-----------
332There's forward progress ... 336There's forward progress ...
333
334
diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.rst
index 10e7f4d16c14..9ca12830a48e 100644
--- a/Documentation/powerpc/firmware-assisted-dump.txt
+++ b/Documentation/powerpc/firmware-assisted-dump.rst
@@ -1,7 +1,8 @@
1======================
2Firmware-Assisted Dump
3======================
1 4
2 Firmware-Assisted Dump 5July 2011
3 ------------------------
4 July 2011
5 6
6The goal of firmware-assisted dump is to enable the dump of 7The goal of firmware-assisted dump is to enable the dump of
7a crashed system, and to do so from a fully-reset system, and 8a crashed system, and to do so from a fully-reset system, and
@@ -27,11 +28,11 @@ in production use.
27Comparing with kdump or other strategies, firmware-assisted 28Comparing with kdump or other strategies, firmware-assisted
28dump offers several strong, practical advantages: 29dump offers several strong, practical advantages:
29 30
30-- Unlike kdump, the system has been reset, and loaded 31- Unlike kdump, the system has been reset, and loaded
31 with a fresh copy of the kernel. In particular, 32 with a fresh copy of the kernel. In particular,
32 PCI and I/O devices have been reinitialized and are 33 PCI and I/O devices have been reinitialized and are
33 in a clean, consistent state. 34 in a clean, consistent state.
34-- Once the dump is copied out, the memory that held the dump 35- Once the dump is copied out, the memory that held the dump
35 is immediately available to the running kernel. And therefore, 36 is immediately available to the running kernel. And therefore,
36 unlike kdump, fadump doesn't need a 2nd reboot to get back 37 unlike kdump, fadump doesn't need a 2nd reboot to get back
37 the system to the production configuration. 38 the system to the production configuration.
@@ -40,17 +41,18 @@ The above can only be accomplished by coordination with,
40and assistance from the Power firmware. The procedure is 41and assistance from the Power firmware. The procedure is
41as follows: 42as follows:
42 43
43-- The first kernel registers the sections of memory with the 44- The first kernel registers the sections of memory with the
44 Power firmware for dump preservation during OS initialization. 45 Power firmware for dump preservation during OS initialization.
45 These registered sections of memory are reserved by the first 46 These registered sections of memory are reserved by the first
46 kernel during early boot. 47 kernel during early boot.
47 48
48-- When a system crashes, the Power firmware will save 49- When a system crashes, the Power firmware will save
49 the low memory (boot memory of size larger of 5% of system RAM 50 the low memory (boot memory of size larger of 5% of system RAM
50 or 256MB) of RAM to the previous registered region. It will 51 or 256MB) of RAM to the previous registered region. It will
51 also save system registers, and hardware PTE's. 52 also save system registers, and hardware PTE's.
52 53
53 NOTE: The term 'boot memory' means size of the low memory chunk 54 NOTE:
55 The term 'boot memory' means size of the low memory chunk
54 that is required for a kernel to boot successfully when 56 that is required for a kernel to boot successfully when
55 booted with restricted memory. By default, the boot memory 57 booted with restricted memory. By default, the boot memory
56 size will be the larger of 5% of system RAM or 256MB. 58 size will be the larger of 5% of system RAM or 256MB.
@@ -64,12 +66,12 @@ as follows:
64 as fadump uses a predefined offset to reserve memory 66 as fadump uses a predefined offset to reserve memory
65 for boot memory dump preservation in case of a crash. 67 for boot memory dump preservation in case of a crash.
66 68
67-- After the low memory (boot memory) area has been saved, the 69- After the low memory (boot memory) area has been saved, the
68 firmware will reset PCI and other hardware state. It will 70 firmware will reset PCI and other hardware state. It will
69 *not* clear the RAM. It will then launch the bootloader, as 71 *not* clear the RAM. It will then launch the bootloader, as
70 normal. 72 normal.
71 73
72-- The freshly booted kernel will notice that there is a new 74- The freshly booted kernel will notice that there is a new
73 node (ibm,dump-kernel) in the device tree, indicating that 75 node (ibm,dump-kernel) in the device tree, indicating that
74 there is crash data available from a previous boot. During 76 there is crash data available from a previous boot. During
75 the early boot OS will reserve rest of the memory above 77 the early boot OS will reserve rest of the memory above
@@ -77,17 +79,18 @@ as follows:
77 size. This will make sure that the second kernel will not 79 size. This will make sure that the second kernel will not
78 touch any of the dump memory area. 80 touch any of the dump memory area.
79 81
80-- User-space tools will read /proc/vmcore to obtain the contents 82- User-space tools will read /proc/vmcore to obtain the contents
81 of memory, which holds the previous crashed kernel dump in ELF 83 of memory, which holds the previous crashed kernel dump in ELF
82 format. The userspace tools may copy this info to disk, or 84 format. The userspace tools may copy this info to disk, or
83 network, nas, san, iscsi, etc. as desired. 85 network, nas, san, iscsi, etc. as desired.
84 86
85-- Once the userspace tool is done saving dump, it will echo 87- Once the userspace tool is done saving dump, it will echo
86 '1' to /sys/kernel/fadump_release_mem to release the reserved 88 '1' to /sys/kernel/fadump_release_mem to release the reserved
87 memory back to general use, except the memory required for 89 memory back to general use, except the memory required for
88 next firmware-assisted dump registration. 90 next firmware-assisted dump registration.
89 91
90 e.g. 92 e.g.::
93
91 # echo 1 > /sys/kernel/fadump_release_mem 94 # echo 1 > /sys/kernel/fadump_release_mem
92 95
93Please note that the firmware-assisted dump feature 96Please note that the firmware-assisted dump feature
@@ -95,7 +98,7 @@ is only available on Power6 and above systems with recent
95firmware versions. 98firmware versions.
96 99
97Implementation details: 100Implementation details:
98---------------------- 101-----------------------
99 102
100During boot, a check is made to see if firmware supports 103During boot, a check is made to see if firmware supports
101this feature on that particular machine. If it does, then 104this feature on that particular machine. If it does, then
@@ -121,7 +124,7 @@ Allocator (CMA) for memory reservation if CMA is configured for kernel.
121With CMA reservation this memory will be available for applications to 124With CMA reservation this memory will be available for applications to
122use it, while kernel is prevented from using it. With this fadump will 125use it, while kernel is prevented from using it. With this fadump will
123still be able to capture all of the kernel memory and most of the user 126still be able to capture all of the kernel memory and most of the user
124space memory except the user pages that were present in CMA region. 127space memory except the user pages that were present in CMA region::
125 128
126 o Memory Reservation during first kernel 129 o Memory Reservation during first kernel
127 130
@@ -166,7 +169,7 @@ The tools to examine the dump will be same as the ones
166used for kdump. 169used for kdump.
167 170
168How to enable firmware-assisted dump (fadump): 171How to enable firmware-assisted dump (fadump):
169------------------------------------- 172----------------------------------------------
170 173
1711. Set config option CONFIG_FA_DUMP=y and build kernel. 1741. Set config option CONFIG_FA_DUMP=y and build kernel.
1722. Boot into linux kernel with 'fadump=on' kernel cmdline option. 1752. Boot into linux kernel with 'fadump=on' kernel cmdline option.
@@ -177,19 +180,20 @@ How to enable firmware-assisted dump (fadump):
177 to specify size of the memory to reserve for boot memory dump 180 to specify size of the memory to reserve for boot memory dump
178 preservation. 181 preservation.
179 182
180NOTE: 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead 183NOTE:
181 use 'crashkernel=' to specify size of the memory to reserve 184 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead
182 for boot memory dump preservation. 185 use 'crashkernel=' to specify size of the memory to reserve
183 2. If firmware-assisted dump fails to reserve memory then it 186 for boot memory dump preservation.
184 will fallback to existing kdump mechanism if 'crashkernel=' 187 2. If firmware-assisted dump fails to reserve memory then it
185 option is set at kernel cmdline. 188 will fallback to existing kdump mechanism if 'crashkernel='
186 3. if user wants to capture all of user space memory and ok with 189 option is set at kernel cmdline.
187 reserved memory not available to production system, then 190 3. if user wants to capture all of user space memory and ok with
188 'fadump=nocma' kernel parameter can be used to fallback to 191 reserved memory not available to production system, then
189 old behaviour. 192 'fadump=nocma' kernel parameter can be used to fallback to
193 old behaviour.
190 194
191Sysfs/debugfs files: 195Sysfs/debugfs files:
192------------ 196--------------------
193 197
194Firmware-assisted dump feature uses sysfs file system to hold 198Firmware-assisted dump feature uses sysfs file system to hold
195the control files and debugfs file to display memory reserved region. 199the control files and debugfs file to display memory reserved region.
@@ -197,20 +201,20 @@ the control files and debugfs file to display memory reserved region.
197Here is the list of files under kernel sysfs: 201Here is the list of files under kernel sysfs:
198 202
199 /sys/kernel/fadump_enabled 203 /sys/kernel/fadump_enabled
200
201 This is used to display the fadump status. 204 This is used to display the fadump status.
202 0 = fadump is disabled 205
203 1 = fadump is enabled 206 - 0 = fadump is disabled
207 - 1 = fadump is enabled
204 208
205 This interface can be used by kdump init scripts to identify if 209 This interface can be used by kdump init scripts to identify if
206 fadump is enabled in the kernel and act accordingly. 210 fadump is enabled in the kernel and act accordingly.
207 211
208 /sys/kernel/fadump_registered 212 /sys/kernel/fadump_registered
209
210 This is used to display the fadump registration status as well 213 This is used to display the fadump registration status as well
211 as to control (start/stop) the fadump registration. 214 as to control (start/stop) the fadump registration.
212 0 = fadump is not registered. 215
213 1 = fadump is registered and ready to handle system crash. 216 - 0 = fadump is not registered.
217 - 1 = fadump is registered and ready to handle system crash.
214 218
215 To register fadump echo 1 > /sys/kernel/fadump_registered and 219 To register fadump echo 1 > /sys/kernel/fadump_registered and
216 echo 0 > /sys/kernel/fadump_registered for un-register and stop the 220 echo 0 > /sys/kernel/fadump_registered for un-register and stop the
@@ -219,13 +223,12 @@ Here is the list of files under kernel sysfs:
219 easily integrated with kdump service start/stop. 223 easily integrated with kdump service start/stop.
220 224
221 /sys/kernel/fadump_release_mem 225 /sys/kernel/fadump_release_mem
222
223 This file is available only when fadump is active during 226 This file is available only when fadump is active during
224 second kernel. This is used to release the reserved memory 227 second kernel. This is used to release the reserved memory
225 region that are held for saving crash dump. To release the 228 region that are held for saving crash dump. To release the
226 reserved memory echo 1 to it: 229 reserved memory echo 1 to it::
227 230
228 echo 1 > /sys/kernel/fadump_release_mem 231 echo 1 > /sys/kernel/fadump_release_mem
229 232
230 After echo 1, the content of the /sys/kernel/debug/powerpc/fadump_region 233 After echo 1, the content of the /sys/kernel/debug/powerpc/fadump_region
231 file will change to reflect the new memory reservations. 234 file will change to reflect the new memory reservations.
@@ -238,38 +241,39 @@ Here is the list of files under powerpc debugfs:
238(Assuming debugfs is mounted on /sys/kernel/debug directory.) 241(Assuming debugfs is mounted on /sys/kernel/debug directory.)
239 242
240 /sys/kernel/debug/powerpc/fadump_region 243 /sys/kernel/debug/powerpc/fadump_region
241
242 This file shows the reserved memory regions if fadump is 244 This file shows the reserved memory regions if fadump is
243 enabled otherwise this file is empty. The output format 245 enabled otherwise this file is empty. The output format
244 is: 246 is::
245 <region>: [<start>-<end>] <reserved-size> bytes, Dumped: <dump-size> 247
248 <region>: [<start>-<end>] <reserved-size> bytes, Dumped: <dump-size>
246 249
247 e.g. 250 e.g.
248 Contents when fadump is registered during first kernel 251 Contents when fadump is registered during first kernel::
249 252
250 # cat /sys/kernel/debug/powerpc/fadump_region 253 # cat /sys/kernel/debug/powerpc/fadump_region
251 CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0 254 CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0
252 HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0 255 HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0
253 DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0 256 DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0
254 257
255 Contents when fadump is active during second kernel 258 Contents when fadump is active during second kernel::
256 259
257 # cat /sys/kernel/debug/powerpc/fadump_region 260 # cat /sys/kernel/debug/powerpc/fadump_region
258 CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020 261 CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020
259 HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x1000 262 HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x1000
260 DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000 263 DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000
261 : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000 264 : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000
262 265
263NOTE: Please refer to Documentation/filesystems/debugfs.txt on 266NOTE:
267 Please refer to Documentation/filesystems/debugfs.txt on
264 how to mount the debugfs filesystem. 268 how to mount the debugfs filesystem.
265 269
266 270
267TODO: 271TODO:
268----- 272-----
269 o Need to come up with the better approach to find out more 273 - Need to come up with the better approach to find out more
270 accurate boot memory size that is required for a kernel to 274 accurate boot memory size that is required for a kernel to
271 boot successfully when booted with restricted memory. 275 boot successfully when booted with restricted memory.
272 o The fadump implementation introduces a fadump crash info structure 276 - The fadump implementation introduces a fadump crash info structure
273 in the scratch area before the ELF core header. The idea of introducing 277 in the scratch area before the ELF core header. The idea of introducing
274 this structure is to pass some important crash info data to the second 278 this structure is to pass some important crash info data to the second
275 kernel which will help second kernel to populate ELF core header with 279 kernel which will help second kernel to populate ELF core header with
@@ -277,7 +281,9 @@ TODO:
277 design implementation does not address a possibility of introducing 281 design implementation does not address a possibility of introducing
278 additional fields (in future) to this structure without affecting 282 additional fields (in future) to this structure without affecting
279 compatibility. Need to come up with the better approach to address this. 283 compatibility. Need to come up with the better approach to address this.
284
280 The possible approaches are: 285 The possible approaches are:
286
281 1. Introduce version field for version tracking, bump up the version 287 1. Introduce version field for version tracking, bump up the version
282 whenever a new field is added to the structure in future. The version 288 whenever a new field is added to the structure in future. The version
283 field can be used to find out what fields are valid for the current 289 field can be used to find out what fields are valid for the current
@@ -285,8 +291,11 @@ TODO:
285 2. Reserve the area of predefined size (say PAGE_SIZE) for this 291 2. Reserve the area of predefined size (say PAGE_SIZE) for this
286 structure and have unused area as reserved (initialized to zero) 292 structure and have unused area as reserved (initialized to zero)
287 for future field additions. 293 for future field additions.
294
288 The advantage of approach 1 over 2 is we don't need to reserve extra space. 295 The advantage of approach 1 over 2 is we don't need to reserve extra space.
289--- 296
290Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> 297Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
298
291This document is based on the original documentation written for phyp 299This document is based on the original documentation written for phyp
300
292assisted dump by Linas Vepstas and Manish Ahuja. 301assisted dump by Linas Vepstas and Manish Ahuja.
diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.rst
index a730ca5a07f8..6808acde672f 100644
--- a/Documentation/powerpc/hvcs.txt
+++ b/Documentation/powerpc/hvcs.rst
@@ -1,19 +1,22 @@
1=========================================================================== 1===============================================================
2 HVCS 2HVCS IBM "Hypervisor Virtual Console Server" Installation Guide
3 IBM "Hypervisor Virtual Console Server" Installation Guide 3===============================================================
4 for Linux Kernel 2.6.4+
5 Copyright (C) 2004 IBM Corporation
6 4
7=========================================================================== 5for Linux Kernel 2.6.4+
8NOTE:Eight space tabs are the optimum editor setting for reading this file.
9===========================================================================
10 6
11 Author(s) : Ryan S. Arnold <rsa@us.ibm.com> 7Copyright (C) 2004 IBM Corporation
12 Date Created: March, 02, 2004
13 Last Changed: August, 24, 2004
14 8
15--------------------------------------------------------------------------- 9.. ===========================================================================
16Table of contents: 10.. NOTE:Eight space tabs are the optimum editor setting for reading this file.
11.. ===========================================================================
12
13
14Author(s): Ryan S. Arnold <rsa@us.ibm.com>
15
16Date Created: March, 02, 2004
17Last Changed: August, 24, 2004
18
19.. Table of contents:
17 20
18 1. Driver Introduction: 21 1. Driver Introduction:
19 2. System Requirements 22 2. System Requirements
@@ -27,8 +30,8 @@ Table of contents:
27 8. Questions & Answers: 30 8. Questions & Answers:
28 9. Reporting Bugs: 31 9. Reporting Bugs:
29 32
30---------------------------------------------------------------------------
311. Driver Introduction: 331. Driver Introduction:
34=======================
32 35
33This is the device driver for the IBM Hypervisor Virtual Console Server, 36This is the device driver for the IBM Hypervisor Virtual Console Server,
34"hvcs". The IBM hvcs provides a tty driver interface to allow Linux user 37"hvcs". The IBM hvcs provides a tty driver interface to allow Linux user
@@ -38,8 +41,8 @@ ppc64 system. Physical hardware consoles per partition are not practical
38on this hardware so system consoles are accessed by this driver using 41on this hardware so system consoles are accessed by this driver using
39firmware interfaces to virtual terminal devices. 42firmware interfaces to virtual terminal devices.
40 43
41---------------------------------------------------------------------------
422. System Requirements: 442. System Requirements:
45=======================
43 46
44This device driver was written using 2.6.4 Linux kernel APIs and will only 47This device driver was written using 2.6.4 Linux kernel APIs and will only
45build and run on kernels of this version or later. 48build and run on kernels of this version or later.
@@ -52,8 +55,8 @@ Sysfs must be mounted on the system so that the user can determine which
52major and minor numbers are associated with each vty-server. Directions 55major and minor numbers are associated with each vty-server. Directions
53for sysfs mounting are outside the scope of this document. 56for sysfs mounting are outside the scope of this document.
54 57
55---------------------------------------------------------------------------
563. Build Options: 583. Build Options:
59=================
57 60
58The hvcs driver registers itself as a tty driver. The tty layer 61The hvcs driver registers itself as a tty driver. The tty layer
59dynamically allocates a block of major and minor numbers in a quantity 62dynamically allocates a block of major and minor numbers in a quantity
@@ -65,11 +68,11 @@ If the default number of device entries is adequate then this driver can be
65built into the kernel. If not, the default can be over-ridden by inserting 68built into the kernel. If not, the default can be over-ridden by inserting
66the driver as a module with insmod parameters. 69the driver as a module with insmod parameters.
67 70
68---------------------------------------------------------------------------
693.1 Built-in: 713.1 Built-in:
72-------------
70 73
71The following menuconfig example demonstrates selecting to build this 74The following menuconfig example demonstrates selecting to build this
72driver into the kernel. 75driver into the kernel::
73 76
74 Device Drivers ---> 77 Device Drivers --->
75 Character devices ---> 78 Character devices --->
@@ -77,11 +80,11 @@ driver into the kernel.
77 80
78Begin the kernel make process. 81Begin the kernel make process.
79 82
80---------------------------------------------------------------------------
813.2 Module: 833.2 Module:
84-----------
82 85
83The following menuconfig example demonstrates selecting to build this 86The following menuconfig example demonstrates selecting to build this
84driver as a kernel module. 87driver as a kernel module::
85 88
86 Device Drivers ---> 89 Device Drivers --->
87 Character devices ---> 90 Character devices --->
@@ -89,11 +92,11 @@ driver as a kernel module.
89 92
90The make process will build the following kernel modules: 93The make process will build the following kernel modules:
91 94
92 hvcs.ko 95 - hvcs.ko
93 hvcserver.ko 96 - hvcserver.ko
94 97
95To insert the module with the default allocation execute the following 98To insert the module with the default allocation execute the following
96commands in the order they appear: 99commands in the order they appear::
97 100
98 insmod hvcserver.ko 101 insmod hvcserver.ko
99 insmod hvcs.ko 102 insmod hvcs.ko
@@ -103,7 +106,7 @@ be inserted first, otherwise the hvcs module will not find some of the
103symbols it expects. 106symbols it expects.
104 107
105To override the default use an insmod parameter as follows (requesting 4 108To override the default use an insmod parameter as follows (requesting 4
106tty devices as an example): 109tty devices as an example)::
107 110
108 insmod hvcs.ko hvcs_parm_num_devs=4 111 insmod hvcs.ko hvcs_parm_num_devs=4
109 112
@@ -115,31 +118,31 @@ source file before building.
115NOTE: The length of time it takes to insmod the driver seems to be related 118NOTE: The length of time it takes to insmod the driver seems to be related
116to the number of tty interfaces the registering driver requests. 119to the number of tty interfaces the registering driver requests.
117 120
118In order to remove the driver module execute the following command: 121In order to remove the driver module execute the following command::
119 122
120 rmmod hvcs.ko 123 rmmod hvcs.ko
121 124
122The recommended method for installing hvcs as a module is to use depmod to 125The recommended method for installing hvcs as a module is to use depmod to
123build a current modules.dep file in /lib/modules/`uname -r` and then 126build a current modules.dep file in /lib/modules/`uname -r` and then
124execute: 127execute::
125 128
126modprobe hvcs hvcs_parm_num_devs=4 129 modprobe hvcs hvcs_parm_num_devs=4
127 130
128The modules.dep file indicates that hvcserver.ko needs to be inserted 131The modules.dep file indicates that hvcserver.ko needs to be inserted
129before hvcs.ko and modprobe uses this file to smartly insert the modules in 132before hvcs.ko and modprobe uses this file to smartly insert the modules in
130the proper order. 133the proper order.
131 134
132The following modprobe command is used to remove hvcs and hvcserver in the 135The following modprobe command is used to remove hvcs and hvcserver in the
133proper order: 136proper order::
134 137
135modprobe -r hvcs 138 modprobe -r hvcs
136 139
137---------------------------------------------------------------------------
1384. Installation: 1404. Installation:
141================
139 142
140The tty layer creates sysfs entries which contain the major and minor 143The tty layer creates sysfs entries which contain the major and minor
141numbers allocated for the hvcs driver. The following snippet of "tree" 144numbers allocated for the hvcs driver. The following snippet of "tree"
142output of the sysfs directory shows where these numbers are presented: 145output of the sysfs directory shows where these numbers are presented::
143 146
144 sys/ 147 sys/
145 |-- *other sysfs base dirs* 148 |-- *other sysfs base dirs*
@@ -164,7 +167,7 @@ output of the sysfs directory shows where these numbers are presented:
164 |-- *other sysfs base dirs* 167 |-- *other sysfs base dirs*
165 168
166For the above examples the following output is a result of cat'ing the 169For the above examples the following output is a result of cat'ing the
167"dev" entry in the hvcs directory: 170"dev" entry in the hvcs directory::
168 171
169 Pow5:/sys/class/tty/hvcs0/ # cat dev 172 Pow5:/sys/class/tty/hvcs0/ # cat dev
170 254:0 173 254:0
@@ -184,7 +187,7 @@ systems running hvcs will already have the device entries created or udev
184will do it automatically. 187will do it automatically.
185 188
186Given the example output above, to manually create a /dev/hvcs* node entry 189Given the example output above, to manually create a /dev/hvcs* node entry
187mknod can be used as follows: 190mknod can be used as follows::
188 191
189 mknod /dev/hvcs0 c 254 0 192 mknod /dev/hvcs0 c 254 0
190 mknod /dev/hvcs1 c 254 1 193 mknod /dev/hvcs1 c 254 1
@@ -195,15 +198,15 @@ Using mknod to manually create the device entries makes these device nodes
195persistent. Once created they will exist prior to the driver insmod. 198persistent. Once created they will exist prior to the driver insmod.
196 199
197Attempting to connect an application to /dev/hvcs* prior to insertion of 200Attempting to connect an application to /dev/hvcs* prior to insertion of
198the hvcs module will result in an error message similar to the following: 201the hvcs module will result in an error message similar to the following::
199 202
200 "/dev/hvcs*: No such device". 203 "/dev/hvcs*: No such device".
201 204
202NOTE: Just because there is a device node present doesn't mean that there 205NOTE: Just because there is a device node present doesn't mean that there
203is a vty-server device configured for that node. 206is a vty-server device configured for that node.
204 207
205---------------------------------------------------------------------------
2065. Connection 2085. Connection
209=============
207 210
208Since this driver controls devices that provide a tty interface a user can 211Since this driver controls devices that provide a tty interface a user can
209interact with the device node entries using any standard tty-interactive 212interact with the device node entries using any standard tty-interactive
@@ -249,7 +252,7 @@ vty-server adapter is associated with which /dev/hvcs* node a special sysfs
249attribute has been added to each vty-server sysfs entry. This entry is 252attribute has been added to each vty-server sysfs entry. This entry is
250called "index" and showing it reveals an integer that refers to the 253called "index" and showing it reveals an integer that refers to the
251/dev/hvcs* entry to use to connect to that device. For instance cating the 254/dev/hvcs* entry to use to connect to that device. For instance cating the
252index attribute of vty-server adapter 30000004 shows the following. 255index attribute of vty-server adapter 30000004 shows the following::
253 256
254 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat index 257 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat index
255 2 258 2
@@ -262,8 +265,8 @@ system the /dev/hvcs* entry that interacts with a particular vty-server
262adapter is not guaranteed to remain the same across system reboots. Look 265adapter is not guaranteed to remain the same across system reboots. Look
263in the Q & A section for more on this issue. 266in the Q & A section for more on this issue.
264 267
265---------------------------------------------------------------------------
2666. Disconnection 2686. Disconnection
269================
267 270
268As a security feature to prevent the delivery of stale data to an 271As a security feature to prevent the delivery of stale data to an
269unintended target the Power5 system firmware disables the fetching of data 272unintended target the Power5 system firmware disables the fetching of data
@@ -305,7 +308,7 @@ connection between the vty-server and target vty ONLY if the vterm_state
305previously read '1'. The write directive is ignored if the vterm_state 308previously read '1'. The write directive is ignored if the vterm_state
306read '0' or if any value other than '0' was written to the vterm_state 309read '0' or if any value other than '0' was written to the vterm_state
307attribute. The following example will show the method used for verifying 310attribute. The following example will show the method used for verifying
308the vty-server connection status and disconnecting a vty-server connection. 311the vty-server connection status and disconnecting a vty-server connection::
309 312
310 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state 313 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state
311 1 314 1
@@ -318,12 +321,12 @@ the vty-server connection status and disconnecting a vty-server connection.
318All vty-server connections are automatically terminated when the device is 321All vty-server connections are automatically terminated when the device is
319hotplug removed and when the module is removed. 322hotplug removed and when the module is removed.
320 323
321---------------------------------------------------------------------------
3227. Configuration 3247. Configuration
325================
323 326
324Each vty-server has a sysfs entry in the /sys/devices/vio directory, which 327Each vty-server has a sysfs entry in the /sys/devices/vio directory, which
325is symlinked in several other sysfs tree directories, notably under the 328is symlinked in several other sysfs tree directories, notably under the
326hvcs driver entry, which looks like the following example: 329hvcs driver entry, which looks like the following example::
327 330
328 Pow5:/sys/bus/vio/drivers/hvcs # ls 331 Pow5:/sys/bus/vio/drivers/hvcs # ls
329 . .. 30000003 30000004 rescan 332 . .. 30000003 30000004 rescan
@@ -344,7 +347,7 @@ completed or was never executed.
344 347
345Vty-server entries in this directory are a 32 bit partition unique unit 348Vty-server entries in this directory are a 32 bit partition unique unit
346address that is created by firmware. An example vty-server sysfs entry 349address that is created by firmware. An example vty-server sysfs entry
347looks like the following: 350looks like the following::
348 351
349 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls 352 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls
350 . current_vty devspec name partner_vtys 353 . current_vty devspec name partner_vtys
@@ -352,21 +355,21 @@ looks like the following:
352 355
353Each entry is provided, by default with a "name" attribute. Reading the 356Each entry is provided, by default with a "name" attribute. Reading the
354"name" attribute will reveal the device type as shown in the following 357"name" attribute will reveal the device type as shown in the following
355example: 358example::
356 359
357 Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name 360 Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name
358 vty-server 361 vty-server
359 362
360Each entry is also provided, by default, with a "devspec" attribute which 363Each entry is also provided, by default, with a "devspec" attribute which
361reveals the full device specification when read, as shown in the following 364reveals the full device specification when read, as shown in the following
362example: 365example::
363 366
364 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec 367 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec
365 /vdevice/vty-server@30000004 368 /vdevice/vty-server@30000004
366 369
367Each vty-server sysfs dir is provided with two read-only attributes that 370Each vty-server sysfs dir is provided with two read-only attributes that
368provide lists of easily parsed partner vty data: "partner_vtys" and 371provide lists of easily parsed partner vty data: "partner_vtys" and
369"partner_clcs". 372"partner_clcs"::
370 373
371 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys 374 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys
372 30000000 375 30000000
@@ -396,7 +399,7 @@ A vty-server can only be connected to a single vty at a time. The entry,
396read. 399read.
397 400
398The current_vty can be changed by writing a valid partner clc to the entry 401The current_vty can be changed by writing a valid partner clc to the entry
399as in the following example: 402as in the following example::
400 403
401 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304 404 Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304
402 8A-V4-C0 > current_vty 405 8A-V4-C0 > current_vty
@@ -408,9 +411,9 @@ currently open connection is freed.
408Information on the "vterm_state" attribute was covered earlier on the 411Information on the "vterm_state" attribute was covered earlier on the
409chapter entitled "disconnection". 412chapter entitled "disconnection".
410 413
411---------------------------------------------------------------------------
4128. Questions & Answers: 4148. Questions & Answers:
413=========================================================================== 415=======================
416
414Q: What are the security concerns involving hvcs? 417Q: What are the security concerns involving hvcs?
415 418
416A: There are three main security concerns: 419A: There are three main security concerns:
@@ -429,6 +432,7 @@ A: There are three main security concerns:
429 partition) will experience the previously logged in session. 432 partition) will experience the previously logged in session.
430 433
431--------------------------------------------------------------------------- 434---------------------------------------------------------------------------
435
432Q: How do I multiplex a console that I grab through hvcs so that other 436Q: How do I multiplex a console that I grab through hvcs so that other
433people can see it: 437people can see it:
434 438
@@ -440,6 +444,7 @@ term type "screen" to others. This means that curses based programs may
440not display properly in screen sessions. 444not display properly in screen sessions.
441 445
442--------------------------------------------------------------------------- 446---------------------------------------------------------------------------
447
443Q: Why are the colors all messed up? 448Q: Why are the colors all messed up?
444Q: Why are the control characters acting strange or not working? 449Q: Why are the control characters acting strange or not working?
445Q: Why is the console output all strange and unintelligible? 450Q: Why is the console output all strange and unintelligible?
@@ -455,6 +460,7 @@ disconnect from the console. This will ensure that the next user gets
455their own TERM type set when they login. 460their own TERM type set when they login.
456 461
457--------------------------------------------------------------------------- 462---------------------------------------------------------------------------
463
458Q: When I try to CONNECT kermit to an hvcs device I get: 464Q: When I try to CONNECT kermit to an hvcs device I get:
459"Sorry, can't open connection: /dev/hvcs*"What is happening? 465"Sorry, can't open connection: /dev/hvcs*"What is happening?
460 466
@@ -490,6 +496,7 @@ A: There is not a corresponding vty-server device that maps to an existing
490/dev/hvcs* entry. 496/dev/hvcs* entry.
491 497
492--------------------------------------------------------------------------- 498---------------------------------------------------------------------------
499
493Q: When I try to CONNECT kermit to an hvcs device I get: 500Q: When I try to CONNECT kermit to an hvcs device I get:
494"Sorry, write access to UUCP lockfile directory denied." 501"Sorry, write access to UUCP lockfile directory denied."
495 502
@@ -497,6 +504,7 @@ A: The /dev/hvcs* entry you have specified doesn't exist where you said it
497does? Maybe you haven't inserted the module (on systems with udev). 504does? Maybe you haven't inserted the module (on systems with udev).
498 505
499--------------------------------------------------------------------------- 506---------------------------------------------------------------------------
507
500Q: If I already have one Linux partition installed can I use hvcs on said 508Q: If I already have one Linux partition installed can I use hvcs on said
501partition to provide the console for the install of a second Linux 509partition to provide the console for the install of a second Linux
502partition? 510partition?
@@ -505,6 +513,7 @@ A: Yes granted that your are connected to the /dev/hvcs* device using
505kermit or cu or some other program that doesn't provide terminal emulation. 513kermit or cu or some other program that doesn't provide terminal emulation.
506 514
507--------------------------------------------------------------------------- 515---------------------------------------------------------------------------
516
508Q: Can I connect to more than one partition's console at a time using this 517Q: Can I connect to more than one partition's console at a time using this
509driver? 518driver?
510 519
@@ -512,6 +521,7 @@ A: Yes. Of course this means that there must be more than one vty-server
512configured for this partition and each must point to a disconnected vty. 521configured for this partition and each must point to a disconnected vty.
513 522
514--------------------------------------------------------------------------- 523---------------------------------------------------------------------------
524
515Q: Does the hvcs driver support dynamic (hotplug) addition of devices? 525Q: Does the hvcs driver support dynamic (hotplug) addition of devices?
516 526
517A: Yes, if you have dlpar and hotplug enabled for your system and it has 527A: Yes, if you have dlpar and hotplug enabled for your system and it has
@@ -519,6 +529,7 @@ been built into the kernel the hvcs drivers is configured to dynamically
519handle additions of new devices and removals of unused devices. 529handle additions of new devices and removals of unused devices.
520 530
521--------------------------------------------------------------------------- 531---------------------------------------------------------------------------
532
522Q: For some reason /dev/hvcs* doesn't map to the same vty-server adapter 533Q: For some reason /dev/hvcs* doesn't map to the same vty-server adapter
523after a reboot. What happened? 534after a reboot. What happened?
524 535
@@ -533,6 +544,7 @@ on how to determine which vty-server goes with which /dev/hvcs* node.
533Hint; look at the sysfs "index" attribute for the vty-server. 544Hint; look at the sysfs "index" attribute for the vty-server.
534 545
535--------------------------------------------------------------------------- 546---------------------------------------------------------------------------
547
536Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty 548Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty
537device on that partition as the other end of the pipe? 549device on that partition as the other end of the pipe?
538 550
@@ -554,7 +566,9 @@ read or write to /dev/hvcs*. Now you have a tty conduit between two
554partitions. 566partitions.
555 567
556--------------------------------------------------------------------------- 568---------------------------------------------------------------------------
569
5579. Reporting Bugs: 5709. Reporting Bugs:
571==================
558 572
559The proper channel for reporting bugs is either through the Linux OS 573The proper channel for reporting bugs is either through the Linux OS
560distribution company that provided your OS or by posting issues to the 574distribution company that provided your OS or by posting issues to the
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
new file mode 100644
index 000000000000..549b1cdd77ae
--- /dev/null
+++ b/Documentation/powerpc/index.rst
@@ -0,0 +1,34 @@
1.. SPDX-License-Identifier: GPL-2.0
2
3=======
4powerpc
5=======
6
7.. toctree::
8 :maxdepth: 1
9
10 bootwrapper
11 cpu_families
12 cpu_features
13 cxl
14 cxlflash
15 dawr-power9
16 dscr
17 eeh-pci-error-recovery
18 firmware-assisted-dump
19 hvcs
20 isa-versions
21 mpc52xx
22 pci_iov_resource_on_powernv
23 pmu-ebb
24 ptrace
25 qe_firmware
26 syscall64-abi
27 transactional_memory
28
29.. only:: subproject and html
30
31 Indices
32 =======
33
34 * :ref:`genindex`
diff --git a/Documentation/powerpc/isa-versions.rst b/Documentation/powerpc/isa-versions.rst
index 66c24140ebf1..a363d8c1603c 100644
--- a/Documentation/powerpc/isa-versions.rst
+++ b/Documentation/powerpc/isa-versions.rst
@@ -1,13 +1,12 @@
1:orphan: 1==========================
2
3CPU to ISA Version Mapping 2CPU to ISA Version Mapping
4========================== 3==========================
5 4
6Mapping of some CPU versions to relevant ISA versions. 5Mapping of some CPU versions to relevant ISA versions.
7 6
8========= ==================== 7========= ====================================================================
9CPU Architecture version 8CPU Architecture version
10========= ==================== 9========= ====================================================================
11Power9 Power ISA v3.0B 10Power9 Power ISA v3.0B
12Power8 Power ISA v2.07 11Power8 Power ISA v2.07
13Power7 Power ISA v2.06 12Power7 Power ISA v2.06
@@ -24,7 +23,7 @@ PPC970 - PowerPC User Instruction Set Architecture Book I v2.01
24 - PowerPC Virtual Environment Architecture Book II v2.01 23 - PowerPC Virtual Environment Architecture Book II v2.01
25 - PowerPC Operating Environment Architecture Book III v2.01 24 - PowerPC Operating Environment Architecture Book III v2.01
26 - Plus Altivec/VMX ~= 2.03 25 - Plus Altivec/VMX ~= 2.03
27========= ==================== 26========= ====================================================================
28 27
29 28
30Key Features 29Key Features
@@ -60,9 +59,9 @@ Power5 No
60PPC970 No 59PPC970 No
61========== ==== 60========== ====
62 61
63========== ==================== 62========== ====================================
64CPU Transactional Memory 63CPU Transactional Memory
65========== ==================== 64========== ====================================
66Power9 Yes (* see transactional_memory.txt) 65Power9 Yes (* see transactional_memory.txt)
67Power8 Yes 66Power8 Yes
68Power7 No 67Power7 No
@@ -73,4 +72,4 @@ Power5++ No
73Power5+ No 72Power5+ No
74Power5 No 73Power5 No
75PPC970 No 74PPC970 No
76========== ==================== 75========== ====================================
diff --git a/Documentation/powerpc/mpc52xx.txt b/Documentation/powerpc/mpc52xx.rst
index 0d540a31ea1a..8676ac63e077 100644
--- a/Documentation/powerpc/mpc52xx.txt
+++ b/Documentation/powerpc/mpc52xx.rst
@@ -1,11 +1,13 @@
1=============================
1Linux 2.6.x on MPC52xx family 2Linux 2.6.x on MPC52xx family
2----------------------------- 3=============================
3 4
4For the latest info, go to http://www.246tNt.com/mpc52xx/ 5For the latest info, go to http://www.246tNt.com/mpc52xx/
5 6
6To compile/use : 7To compile/use :
7 8
8 - U-Boot: 9 - U-Boot::
10
9 # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION 11 # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
10 if you wish to ). 12 if you wish to ).
11 # make lite5200_defconfig 13 # make lite5200_defconfig
@@ -16,7 +18,8 @@ To compile/use :
16 => tftpboot 400000 pRamdisk 18 => tftpboot 400000 pRamdisk
17 => bootm 200000 400000 19 => bootm 200000 400000
18 20
19 - DBug: 21 - DBug::
22
20 # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION 23 # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
21 if you wish to ). 24 if you wish to ).
22 # make lite5200_defconfig 25 # make lite5200_defconfig
@@ -28,7 +31,8 @@ To compile/use :
28 DBug> dn -i zImage.initrd.lite5200 31 DBug> dn -i zImage.initrd.lite5200
29 32
30 33
31Some remarks : 34Some remarks:
35
32 - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100 36 - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100
33 is not supported, and I'm not sure anyone is interesting in working on it 37 is not supported, and I'm not sure anyone is interesting in working on it
34 so. I didn't took 5xxx because there's apparently a lot of 5xxx that have 38 so. I didn't took 5xxx because there's apparently a lot of 5xxx that have
diff --git a/Documentation/powerpc/pci_iov_resource_on_powernv.txt b/Documentation/powerpc/pci_iov_resource_on_powernv.rst
index b55c5cd83f8d..f5a5793e1613 100644
--- a/Documentation/powerpc/pci_iov_resource_on_powernv.txt
+++ b/Documentation/powerpc/pci_iov_resource_on_powernv.rst
@@ -1,6 +1,13 @@
1===================================================
2PCI Express I/O Virtualization Resource on Powerenv
3===================================================
4
1Wei Yang <weiyang@linux.vnet.ibm.com> 5Wei Yang <weiyang@linux.vnet.ibm.com>
6
2Benjamin Herrenschmidt <benh@au1.ibm.com> 7Benjamin Herrenschmidt <benh@au1.ibm.com>
8
3Bjorn Helgaas <bhelgaas@google.com> 9Bjorn Helgaas <bhelgaas@google.com>
10
426 Aug 2014 1126 Aug 2014
5 12
6This document describes the requirement from hardware for PCI MMIO resource 13This document describes the requirement from hardware for PCI MMIO resource
@@ -10,6 +17,7 @@ Endpoints and the implementation on P8 (IODA2). The next two sections talks
10about considerations on enabling SRIOV on IODA2. 17about considerations on enabling SRIOV on IODA2.
11 18
121. Introduction to Partitionable Endpoints 191. Introduction to Partitionable Endpoints
20==========================================
13 21
14A Partitionable Endpoint (PE) is a way to group the various resources 22A Partitionable Endpoint (PE) is a way to group the various resources
15associated with a device or a set of devices to provide isolation between 23associated with a device or a set of devices to provide isolation between
@@ -35,6 +43,7 @@ is a completely separate HW entity that replicates the entire logic, so has
35its own set of PEs, etc. 43its own set of PEs, etc.
36 44
372. Implementation of Partitionable Endpoints on P8 (IODA2) 452. Implementation of Partitionable Endpoints on P8 (IODA2)
46==========================================================
38 47
39P8 supports up to 256 Partitionable Endpoints per PHB. 48P8 supports up to 256 Partitionable Endpoints per PHB.
40 49
@@ -149,6 +158,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
149 sense, but we haven't done it yet. 158 sense, but we haven't done it yet.
150 159
1513. Considerations for SR-IOV on PowerKVM 1603. Considerations for SR-IOV on PowerKVM
161========================================
152 162
153 * SR-IOV Background 163 * SR-IOV Background
154 164
@@ -224,7 +234,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
224 IODA supports 256 PEs, so segmented windows contain 256 segments, so if 234 IODA supports 256 PEs, so segmented windows contain 256 segments, so if
225 total_VFs is less than 256, we have the situation in Figure 1.0, where 235 total_VFs is less than 256, we have the situation in Figure 1.0, where
226 segments [total_VFs, 255] of the M64 window may map to some MMIO range on 236 segments [total_VFs, 255] of the M64 window may map to some MMIO range on
227 other devices: 237 other devices::
228 238
229 0 1 total_VFs - 1 239 0 1 total_VFs - 1
230 +------+------+- -+------+------+ 240 +------+------+- -+------+------+
@@ -243,7 +253,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
243 Figure 1.0 Direct map VF(n) BAR space 253 Figure 1.0 Direct map VF(n) BAR space
244 254
245 Our current solution is to allocate 256 segments even if the VF(n) BAR 255 Our current solution is to allocate 256 segments even if the VF(n) BAR
246 space doesn't need that much, as shown in Figure 1.1: 256 space doesn't need that much, as shown in Figure 1.1::
247 257
248 0 1 total_VFs - 1 255 258 0 1 total_VFs - 1 255
249 +------+------+- -+------+------+- -+------+------+ 259 +------+------+- -+------+------+- -+------+------+
@@ -269,6 +279,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
269 responds to segments [total_VFs, 255]. 279 responds to segments [total_VFs, 255].
270 280
2714. Implications for the Generic PCI Code 2814. Implications for the Generic PCI Code
282========================================
272 283
273The PCIe SR-IOV spec requires that the base of the VF(n) BAR space be 284The PCIe SR-IOV spec requires that the base of the VF(n) BAR space be
274aligned to the size of an individual VF BAR. 285aligned to the size of an individual VF BAR.
diff --git a/Documentation/powerpc/pmu-ebb.txt b/Documentation/powerpc/pmu-ebb.rst
index 73cd163dbfb8..4f474758eb55 100644
--- a/Documentation/powerpc/pmu-ebb.txt
+++ b/Documentation/powerpc/pmu-ebb.rst
@@ -1,3 +1,4 @@
1========================
1PMU Event Based Branches 2PMU Event Based Branches
2======================== 3========================
3 4
diff --git a/Documentation/powerpc/ptrace.rst b/Documentation/powerpc/ptrace.rst
new file mode 100644
index 000000000000..864d4b6dddd1
--- /dev/null
+++ b/Documentation/powerpc/ptrace.rst
@@ -0,0 +1,156 @@
1======
2Ptrace
3======
4
5GDB intends to support the following hardware debug features of BookE
6processors:
7
84 hardware breakpoints (IAC)
92 hardware watchpoints (read, write and read-write) (DAC)
102 value conditions for the hardware watchpoints (DVC)
11
12For that, we need to extend ptrace so that GDB can query and set these
13resources. Since we're extending, we're trying to create an interface
14that's extendable and that covers both BookE and server processors, so
15that GDB doesn't need to special-case each of them. We added the
16following 3 new ptrace requests.
17
181. PTRACE_PPC_GETHWDEBUGINFO
19============================
20
21Query for GDB to discover the hardware debug features. The main info to
22be returned here is the minimum alignment for the hardware watchpoints.
23BookE processors don't have restrictions here, but server processors have
24an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid
25adding special cases to GDB based on what it sees in AUXV.
26
27Since we're at it, we added other useful info that the kernel can return to
28GDB: this query will return the number of hardware breakpoints, hardware
29watchpoints and whether it supports a range of addresses and a condition.
30The query will fill the following structure provided by the requesting process::
31
32 struct ppc_debug_info {
33 unit32_t version;
34 unit32_t num_instruction_bps;
35 unit32_t num_data_bps;
36 unit32_t num_condition_regs;
37 unit32_t data_bp_alignment;
38 unit32_t sizeof_condition; /* size of the DVC register */
39 uint64_t features; /* bitmask of the individual flags */
40 };
41
42features will have bits indicating whether there is support for::
43
44 #define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x1
45 #define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2
46 #define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4
47 #define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8
48 #define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10
49
502. PTRACE_SETHWDEBUG
51
52Sets a hardware breakpoint or watchpoint, according to the provided structure::
53
54 struct ppc_hw_breakpoint {
55 uint32_t version;
56 #define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x1
57 #define PPC_BREAKPOINT_TRIGGER_READ 0x2
58 #define PPC_BREAKPOINT_TRIGGER_WRITE 0x4
59 uint32_t trigger_type; /* only some combinations allowed */
60 #define PPC_BREAKPOINT_MODE_EXACT 0x0
61 #define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x1
62 #define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x2
63 #define PPC_BREAKPOINT_MODE_MASK 0x3
64 uint32_t addr_mode; /* address match mode */
65
66 #define PPC_BREAKPOINT_CONDITION_MODE 0x3
67 #define PPC_BREAKPOINT_CONDITION_NONE 0x0
68 #define PPC_BREAKPOINT_CONDITION_AND 0x1
69 #define PPC_BREAKPOINT_CONDITION_EXACT 0x1 /* different name for the same thing as above */
70 #define PPC_BREAKPOINT_CONDITION_OR 0x2
71 #define PPC_BREAKPOINT_CONDITION_AND_OR 0x3
72 #define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 /* byte enable bits */
73 #define PPC_BREAKPOINT_CONDITION_BE(n) (1<<((n)+16))
74 uint32_t condition_mode; /* break/watchpoint condition flags */
75
76 uint64_t addr;
77 uint64_t addr2;
78 uint64_t condition_value;
79 };
80
81A request specifies one event, not necessarily just one register to be set.
82For instance, if the request is for a watchpoint with a condition, both the
83DAC and DVC registers will be set in the same request.
84
85With this GDB can ask for all kinds of hardware breakpoints and watchpoints
86that the BookE supports. COMEFROM breakpoints available in server processors
87are not contemplated, but that is out of the scope of this work.
88
89ptrace will return an integer (handle) uniquely identifying the breakpoint or
90watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG
91request to ask for its removal. Return -ENOSPC if the requested breakpoint
92can't be allocated on the registers.
93
94Some examples of using the structure to:
95
96- set a breakpoint in the first breakpoint register::
97
98 p.version = PPC_DEBUG_CURRENT_VERSION;
99 p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
100 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
101 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
102 p.addr = (uint64_t) address;
103 p.addr2 = 0;
104 p.condition_value = 0;
105
106- set a watchpoint which triggers on reads in the second watchpoint register::
107
108 p.version = PPC_DEBUG_CURRENT_VERSION;
109 p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
110 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
111 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
112 p.addr = (uint64_t) address;
113 p.addr2 = 0;
114 p.condition_value = 0;
115
116- set a watchpoint which triggers only with a specific value::
117
118 p.version = PPC_DEBUG_CURRENT_VERSION;
119 p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
120 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
121 p.condition_mode = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL;
122 p.addr = (uint64_t) address;
123 p.addr2 = 0;
124 p.condition_value = (uint64_t) condition;
125
126- set a ranged hardware breakpoint::
127
128 p.version = PPC_DEBUG_CURRENT_VERSION;
129 p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
130 p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
131 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
132 p.addr = (uint64_t) begin_range;
133 p.addr2 = (uint64_t) end_range;
134 p.condition_value = 0;
135
136- set a watchpoint in server processors (BookS)::
137
138 p.version = 1;
139 p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
140 p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
141 or
142 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
143
144 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
145 p.addr = (uint64_t) begin_range;
146 /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where
147 * addr2 - addr <= 8 Bytes.
148 */
149 p.addr2 = (uint64_t) end_range;
150 p.condition_value = 0;
151
1523. PTRACE_DELHWDEBUG
153
154Takes an integer which identifies an existing breakpoint or watchpoint
155(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the
156corresponding breakpoint or watchpoint..
diff --git a/Documentation/powerpc/ptrace.txt b/Documentation/powerpc/ptrace.txt
deleted file mode 100644
index 99c5ce88d0fe..000000000000
--- a/Documentation/powerpc/ptrace.txt
+++ /dev/null
@@ -1,151 +0,0 @@
1GDB intends to support the following hardware debug features of BookE
2processors:
3
44 hardware breakpoints (IAC)
52 hardware watchpoints (read, write and read-write) (DAC)
62 value conditions for the hardware watchpoints (DVC)
7
8For that, we need to extend ptrace so that GDB can query and set these
9resources. Since we're extending, we're trying to create an interface
10that's extendable and that covers both BookE and server processors, so
11that GDB doesn't need to special-case each of them. We added the
12following 3 new ptrace requests.
13
141. PTRACE_PPC_GETHWDEBUGINFO
15
16Query for GDB to discover the hardware debug features. The main info to
17be returned here is the minimum alignment for the hardware watchpoints.
18BookE processors don't have restrictions here, but server processors have
19an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid
20adding special cases to GDB based on what it sees in AUXV.
21
22Since we're at it, we added other useful info that the kernel can return to
23GDB: this query will return the number of hardware breakpoints, hardware
24watchpoints and whether it supports a range of addresses and a condition.
25The query will fill the following structure provided by the requesting process:
26
27struct ppc_debug_info {
28 unit32_t version;
29 unit32_t num_instruction_bps;
30 unit32_t num_data_bps;
31 unit32_t num_condition_regs;
32 unit32_t data_bp_alignment;
33 unit32_t sizeof_condition; /* size of the DVC register */
34 uint64_t features; /* bitmask of the individual flags */
35};
36
37features will have bits indicating whether there is support for:
38
39#define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x1
40#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2
41#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4
42#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8
43#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10
44
452. PTRACE_SETHWDEBUG
46
47Sets a hardware breakpoint or watchpoint, according to the provided structure:
48
49struct ppc_hw_breakpoint {
50 uint32_t version;
51#define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x1
52#define PPC_BREAKPOINT_TRIGGER_READ 0x2
53#define PPC_BREAKPOINT_TRIGGER_WRITE 0x4
54 uint32_t trigger_type; /* only some combinations allowed */
55#define PPC_BREAKPOINT_MODE_EXACT 0x0
56#define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x1
57#define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x2
58#define PPC_BREAKPOINT_MODE_MASK 0x3
59 uint32_t addr_mode; /* address match mode */
60
61#define PPC_BREAKPOINT_CONDITION_MODE 0x3
62#define PPC_BREAKPOINT_CONDITION_NONE 0x0
63#define PPC_BREAKPOINT_CONDITION_AND 0x1
64#define PPC_BREAKPOINT_CONDITION_EXACT 0x1 /* different name for the same thing as above */
65#define PPC_BREAKPOINT_CONDITION_OR 0x2
66#define PPC_BREAKPOINT_CONDITION_AND_OR 0x3
67#define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 /* byte enable bits */
68#define PPC_BREAKPOINT_CONDITION_BE(n) (1<<((n)+16))
69 uint32_t condition_mode; /* break/watchpoint condition flags */
70
71 uint64_t addr;
72 uint64_t addr2;
73 uint64_t condition_value;
74};
75
76A request specifies one event, not necessarily just one register to be set.
77For instance, if the request is for a watchpoint with a condition, both the
78DAC and DVC registers will be set in the same request.
79
80With this GDB can ask for all kinds of hardware breakpoints and watchpoints
81that the BookE supports. COMEFROM breakpoints available in server processors
82are not contemplated, but that is out of the scope of this work.
83
84ptrace will return an integer (handle) uniquely identifying the breakpoint or
85watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG
86request to ask for its removal. Return -ENOSPC if the requested breakpoint
87can't be allocated on the registers.
88
89Some examples of using the structure to:
90
91- set a breakpoint in the first breakpoint register
92
93 p.version = PPC_DEBUG_CURRENT_VERSION;
94 p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
95 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
96 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
97 p.addr = (uint64_t) address;
98 p.addr2 = 0;
99 p.condition_value = 0;
100
101- set a watchpoint which triggers on reads in the second watchpoint register
102
103 p.version = PPC_DEBUG_CURRENT_VERSION;
104 p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
105 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
106 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
107 p.addr = (uint64_t) address;
108 p.addr2 = 0;
109 p.condition_value = 0;
110
111- set a watchpoint which triggers only with a specific value
112
113 p.version = PPC_DEBUG_CURRENT_VERSION;
114 p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
115 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
116 p.condition_mode = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL;
117 p.addr = (uint64_t) address;
118 p.addr2 = 0;
119 p.condition_value = (uint64_t) condition;
120
121- set a ranged hardware breakpoint
122
123 p.version = PPC_DEBUG_CURRENT_VERSION;
124 p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
125 p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
126 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
127 p.addr = (uint64_t) begin_range;
128 p.addr2 = (uint64_t) end_range;
129 p.condition_value = 0;
130
131- set a watchpoint in server processors (BookS)
132
133 p.version = 1;
134 p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
135 p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
136 or
137 p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
138
139 p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
140 p.addr = (uint64_t) begin_range;
141 /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where
142 * addr2 - addr <= 8 Bytes.
143 */
144 p.addr2 = (uint64_t) end_range;
145 p.condition_value = 0;
146
1473. PTRACE_DELHWDEBUG
148
149Takes an integer which identifies an existing breakpoint or watchpoint
150(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the
151corresponding breakpoint or watchpoint..
diff --git a/Documentation/powerpc/qe_firmware.txt b/Documentation/powerpc/qe_firmware.rst
index e7ac24aec4ff..42f5103140c9 100644
--- a/Documentation/powerpc/qe_firmware.txt
+++ b/Documentation/powerpc/qe_firmware.rst
@@ -1,23 +1,23 @@
1 Freescale QUICC Engine Firmware Uploading 1=========================================
2 ----------------------------------------- 2Freescale QUICC Engine Firmware Uploading
3=========================================
3 4
4(c) 2007 Timur Tabi <timur at freescale.com>, 5(c) 2007 Timur Tabi <timur at freescale.com>,
5 Freescale Semiconductor 6 Freescale Semiconductor
6 7
7Table of Contents 8.. Table of Contents
8=================
9 9
10 I - Software License for Firmware 10 I - Software License for Firmware
11 11
12 II - Microcode Availability 12 II - Microcode Availability
13 13
14 III - Description and Terminology 14 III - Description and Terminology
15 15
16 IV - Microcode Programming Details 16 IV - Microcode Programming Details
17 17
18 V - Firmware Structure Layout 18 V - Firmware Structure Layout
19 19
20 VI - Sample Code for Creating Firmware Files 20 VI - Sample Code for Creating Firmware Files
21 21
22Revision Information 22Revision Information
23==================== 23====================
@@ -39,7 +39,7 @@ http://opensource.freescale.com. For other firmware files, please contact
39your Freescale representative or your operating system vendor. 39your Freescale representative or your operating system vendor.
40 40
41III - Description and Terminology 41III - Description and Terminology
42================================ 42=================================
43 43
44In this document, the term 'microcode' refers to the sequence of 32-bit 44In this document, the term 'microcode' refers to the sequence of 32-bit
45integers that compose the actual QE microcode. 45integers that compose the actual QE microcode.
@@ -89,7 +89,7 @@ being fixed in the RAM package utilizing they should be activated. This data
89structure signals the microcode which of these virtual traps is active. 89structure signals the microcode which of these virtual traps is active.
90 90
91This structure contains 6 words that the application should copy to some 91This structure contains 6 words that the application should copy to some
92specific been defined. This table describes the structure. 92specific been defined. This table describes the structure::
93 93
94 --------------------------------------------------------------- 94 ---------------------------------------------------------------
95 | Offset in | | Destination Offset | Size of | 95 | Offset in | | Destination Offset | Size of |
@@ -119,7 +119,7 @@ Extended Modes
119This is a double word bit array (64 bits) that defines special functionality 119This is a double word bit array (64 bits) that defines special functionality
120which has an impact on the software drivers. Each bit has its own impact 120which has an impact on the software drivers. Each bit has its own impact
121and has special instructions for the s/w associated with it. This structure is 121and has special instructions for the s/w associated with it. This structure is
122described in this table: 122described in this table::
123 123
124 ----------------------------------------------------------------------- 124 -----------------------------------------------------------------------
125 | Bit # | Name | Description | 125 | Bit # | Name | Description |
@@ -220,7 +220,8 @@ The 'model' field is a 16-bit number that matches the actual SOC. The
220'major' and 'minor' fields are the major and minor revision numbers, 220'major' and 'minor' fields are the major and minor revision numbers,
221respectively, of the SOC. 221respectively, of the SOC.
222 222
223For example, to match the 8323, revision 1.0: 223For example, to match the 8323, revision 1.0::
224
224 soc.model = 8323 225 soc.model = 8323
225 soc.major = 1 226 soc.major = 1
226 soc.minor = 0 227 soc.minor = 0
@@ -273,10 +274,10 @@ library and available to any driver that calles qe_get_firmware_info().
273 'reserved'. 274 'reserved'.
274 275
275After the last microcode is a 32-bit CRC. It can be calculated using 276After the last microcode is a 32-bit CRC. It can be calculated using
276this algorithm: 277this algorithm::
277 278
278u32 crc32(const u8 *p, unsigned int len) 279 u32 crc32(const u8 *p, unsigned int len)
279{ 280 {
280 unsigned int i; 281 unsigned int i;
281 u32 crc = 0; 282 u32 crc = 0;
282 283
@@ -286,7 +287,7 @@ u32 crc32(const u8 *p, unsigned int len)
286 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); 287 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
287 } 288 }
288 return crc; 289 return crc;
289} 290 }
290 291
291VI - Sample Code for Creating Firmware Files 292VI - Sample Code for Creating Firmware Files
292============================================ 293============================================
diff --git a/Documentation/powerpc/syscall64-abi.txt b/Documentation/powerpc/syscall64-abi.rst
index fa716a0d88bd..e49f69f941b9 100644
--- a/Documentation/powerpc/syscall64-abi.txt
+++ b/Documentation/powerpc/syscall64-abi.rst
@@ -5,12 +5,12 @@ Power Architecture 64-bit Linux system call ABI
5syscall 5syscall
6======= 6=======
7 7
8syscall calling sequence[*] matches the Power Architecture 64-bit ELF ABI 8syscall calling sequence\ [1]_ matches the Power Architecture 64-bit ELF ABI
9specification C function calling sequence, including register preservation 9specification C function calling sequence, including register preservation
10rules, with the following differences. 10rules, with the following differences.
11 11
12[*] Some syscalls (typically low-level management functions) may have 12.. [1] Some syscalls (typically low-level management functions) may have
13 different calling sequences (e.g., rt_sigreturn). 13 different calling sequences (e.g., rt_sigreturn).
14 14
15Parameters and return value 15Parameters and return value
16--------------------------- 16---------------------------
@@ -33,12 +33,14 @@ Register preservation rules
33Register preservation rules match the ELF ABI calling sequence with the 33Register preservation rules match the ELF ABI calling sequence with the
34following differences: 34following differences:
35 35
36r0: Volatile. (System call number.) 36=========== ============= ========================================
37r3: Volatile. (Parameter 1, and return value.) 37r0 Volatile (System call number.)
38r4-r8: Volatile. (Parameters 2-6.) 38r3 Volatile (Parameter 1, and return value.)
39cr0: Volatile (cr0.SO is the return error condition) 39r4-r8 Volatile (Parameters 2-6.)
40cr1, cr5-7: Nonvolatile. 40cr0 Volatile (cr0.SO is the return error condition)
41lr: Nonvolatile. 41cr1, cr5-7 Nonvolatile
42lr Nonvolatile
43=========== ============= ========================================
42 44
43All floating point and vector data registers as well as control and status 45All floating point and vector data registers as well as control and status
44registers are nonvolatile. 46registers are nonvolatile.
@@ -90,9 +92,12 @@ The vsyscall may or may not use the caller's stack frame save areas.
90 92
91Register preservation rules 93Register preservation rules
92--------------------------- 94---------------------------
93r0: Volatile. 95
94cr1, cr5-7: Volatile. 96=========== ========
95lr: Volatile. 97r0 Volatile
98cr1, cr5-7 Volatile
99lr Volatile
100=========== ========
96 101
97Invocation 102Invocation
98---------- 103----------
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.rst
index 52c023e14f26..09955103acb4 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.rst
@@ -1,3 +1,4 @@
1============================
1Transactional Memory support 2Transactional Memory support
2============================ 3============================
3 4
@@ -17,29 +18,29 @@ instructions are presented to delimit transactions; transactions are
17guaranteed to either complete atomically or roll back and undo any partial 18guaranteed to either complete atomically or roll back and undo any partial
18changes. 19changes.
19 20
20A simple transaction looks like this: 21A simple transaction looks like this::
21 22
22begin_move_money: 23 begin_move_money:
23 tbegin 24 tbegin
24 beq abort_handler 25 beq abort_handler
25 26
26 ld r4, SAVINGS_ACCT(r3) 27 ld r4, SAVINGS_ACCT(r3)
27 ld r5, CURRENT_ACCT(r3) 28 ld r5, CURRENT_ACCT(r3)
28 subi r5, r5, 1 29 subi r5, r5, 1
29 addi r4, r4, 1 30 addi r4, r4, 1
30 std r4, SAVINGS_ACCT(r3) 31 std r4, SAVINGS_ACCT(r3)
31 std r5, CURRENT_ACCT(r3) 32 std r5, CURRENT_ACCT(r3)
32 33
33 tend 34 tend
34 35
35 b continue 36 b continue
36 37
37abort_handler: 38 abort_handler:
38 ... test for odd failures ... 39 ... test for odd failures ...
39 40
40 /* Retry the transaction if it failed because it conflicted with 41 /* Retry the transaction if it failed because it conflicted with
41 * someone else: */ 42 * someone else: */
42 b begin_move_money 43 b begin_move_money
43 44
44 45
45The 'tbegin' instruction denotes the start point, and 'tend' the end point. 46The 'tbegin' instruction denotes the start point, and 'tend' the end point.
@@ -123,7 +124,7 @@ Transaction-aware signal handlers can read the transactional register state
123from the second ucontext. This will be necessary for crash handlers to 124from the second ucontext. This will be necessary for crash handlers to
124determine, for example, the address of the instruction causing the SIGSEGV. 125determine, for example, the address of the instruction causing the SIGSEGV.
125 126
126Example signal handler: 127Example signal handler::
127 128
128 void crash_handler(int sig, siginfo_t *si, void *uc) 129 void crash_handler(int sig, siginfo_t *si, void *uc)
129 { 130 {
@@ -133,9 +134,9 @@ Example signal handler:
133 if (ucp_link) { 134 if (ucp_link) {
134 u64 msr = ucp->uc_mcontext.regs->msr; 135 u64 msr = ucp->uc_mcontext.regs->msr;
135 /* May have transactional ucontext! */ 136 /* May have transactional ucontext! */
136#ifndef __powerpc64__ 137 #ifndef __powerpc64__
137 msr |= ((u64)transactional_ucp->uc_mcontext.regs->msr) << 32; 138 msr |= ((u64)transactional_ucp->uc_mcontext.regs->msr) << 32;
138#endif 139 #endif
139 if (MSR_TM_ACTIVE(msr)) { 140 if (MSR_TM_ACTIVE(msr)) {
140 /* Yes, we crashed during a transaction. Oops. */ 141 /* Yes, we crashed during a transaction. Oops. */
141 fprintf(stderr, "Transaction to be restarted at 0x%llx, but " 142 fprintf(stderr, "Transaction to be restarted at 0x%llx, but "
@@ -176,6 +177,7 @@ Failure cause codes used by kernel
176These are defined in <asm/reg.h>, and distinguish different reasons why the 177These are defined in <asm/reg.h>, and distinguish different reasons why the
177kernel aborted a transaction: 178kernel aborted a transaction:
178 179
180 ====================== ================================
179 TM_CAUSE_RESCHED Thread was rescheduled. 181 TM_CAUSE_RESCHED Thread was rescheduled.
180 TM_CAUSE_TLBI Software TLB invalid. 182 TM_CAUSE_TLBI Software TLB invalid.
181 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. 183 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
@@ -184,6 +186,7 @@ kernel aborted a transaction:
184 TM_CAUSE_MISC Currently unused. 186 TM_CAUSE_MISC Currently unused.
185 TM_CAUSE_ALIGNMENT Alignment fault. 187 TM_CAUSE_ALIGNMENT Alignment fault.
186 TM_CAUSE_EMULATE Emulation that touched memory. 188 TM_CAUSE_EMULATE Emulation that touched memory.
189 ====================== ================================
187 190
188These can be checked by the user program's abort handler as TEXASR[0:7]. If 191These can be checked by the user program's abort handler as TEXASR[0:7]. If
189bit 7 is set, it indicates that the error is consider persistent. For example 192bit 7 is set, it indicates that the error is consider persistent. For example
@@ -203,7 +206,7 @@ POWER9
203====== 206======
204 207
205TM on POWER9 has issues with storing the complete register state. This 208TM on POWER9 has issues with storing the complete register state. This
206is described in this commit: 209is described in this commit::
207 210
208 commit 4bb3c7a0208fc13ca70598efd109901a7cd45ae7 211 commit 4bb3c7a0208fc13ca70598efd109901a7cd45ae7
209 Author: Paul Mackerras <paulus@ozlabs.org> 212 Author: Paul Mackerras <paulus@ozlabs.org>
diff --git a/Documentation/process/conf.py b/Documentation/process/conf.py
deleted file mode 100644
index 1b01a80ad9ce..000000000000
--- a/Documentation/process/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = 'Linux Kernel Development Documentation'
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'process.tex', 'Linux Kernel Development Documentation',
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
index 49e0f64a3427..053b24a6dd38 100644
--- a/Documentation/process/deprecated.rst
+++ b/Documentation/process/deprecated.rst
@@ -119,3 +119,17 @@ array may exceed the remaining memory in the stack segment. This could
119lead to a crash, possible overwriting sensitive contents at the end of the 119lead to a crash, possible overwriting sensitive contents at the end of the
120stack (when built without `CONFIG_THREAD_INFO_IN_TASK=y`), or overwriting 120stack (when built without `CONFIG_THREAD_INFO_IN_TASK=y`), or overwriting
121memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`) 121memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`)
122
123Implicit switch case fall-through
124---------------------------------
125The C language allows switch cases to "fall through" when
126a "break" statement is missing at the end of a case. This,
127however, introduces ambiguity in the code, as it's not always
128clear if the missing break is intentional or a bug. As there
129have been a long list of flaws `due to missing "break" statements
130<https://cwe.mitre.org/data/definitions/484.html>`_, we no longer allow
131"implicit fall-through". In order to identify an intentional fall-through
132case, we have adopted the marking used by static analyzers: a comment
133saying `/* Fall through */`. Once the C++17 `__attribute__((fallthrough))`
134is more widely handled by C compilers, static analyzers, and IDEs, we can
135switch to using that instead.
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
new file mode 100644
index 000000000000..d37cbc502936
--- /dev/null
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -0,0 +1,279 @@
1Embargoed hardware issues
2=========================
3
4Scope
5-----
6
7Hardware issues which result in security problems are a different category
8of security bugs than pure software bugs which only affect the Linux
9kernel.
10
11Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
12differently because they usually affect all Operating Systems ("OS") and
13therefore need coordination across different OS vendors, distributions,
14hardware vendors and other parties. For some of the issues, software
15mitigations can depend on microcode or firmware updates, which need further
16coordination.
17
18.. _Contact:
19
20Contact
21-------
22
23The Linux kernel hardware security team is separate from the regular Linux
24kernel security team.
25
26The team only handles the coordination of embargoed hardware security
27issues. Reports of pure software security bugs in the Linux kernel are not
28handled by this team and the reporter will be guided to contact the regular
29Linux kernel security team (:ref:`Documentation/admin-guide/
30<securitybugs>`) instead.
31
32The team can be contacted by email at <hardware-security@kernel.org>. This
33is a private list of security officers who will help you to coordinate an
34issue according to our documented process.
35
36The list is encrypted and email to the list can be sent by either PGP or
37S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
38certificate. The list's PGP key and S/MIME certificate are available from
39https://www.kernel.org/....
40
41While hardware security issues are often handled by the affected hardware
42vendor, we welcome contact from researchers or individuals who have
43identified a potential hardware flaw.
44
45Hardware security officers
46^^^^^^^^^^^^^^^^^^^^^^^^^^
47
48The current team of hardware security officers:
49
50 - Linus Torvalds (Linux Foundation Fellow)
51 - Greg Kroah-Hartman (Linux Foundation Fellow)
52 - Thomas Gleixner (Linux Foundation Fellow)
53
54Operation of mailing-lists
55^^^^^^^^^^^^^^^^^^^^^^^^^^
56
57The encrypted mailing-lists which are used in our process are hosted on
58Linux Foundation's IT infrastructure. By providing this service Linux
59Foundation's director of IT Infrastructure security technically has the
60ability to access the embargoed information, but is obliged to
61confidentiality by his employment contract. Linux Foundation's director of
62IT Infrastructure security is also responsible for the kernel.org
63infrastructure.
64
65The Linux Foundation's current director of IT Infrastructure security is
66Konstantin Ryabitsev.
67
68
69Non-disclosure agreements
70-------------------------
71
72The Linux kernel hardware security team is not a formal body and therefore
73unable to enter into any non-disclosure agreements. The kernel community
74is aware of the sensitive nature of such issues and offers a Memorandum of
75Understanding instead.
76
77
78Memorandum of Understanding
79---------------------------
80
81The Linux kernel community has a deep understanding of the requirement to
82keep hardware security issues under embargo for coordination between
83different OS vendors, distributors, hardware vendors and other parties.
84
85The Linux kernel community has successfully handled hardware security
86issues in the past and has the necessary mechanisms in place to allow
87community compliant development under embargo restrictions.
88
89The Linux kernel community has a dedicated hardware security team for
90initial contact, which oversees the process of handling such issues under
91embargo rules.
92
93The hardware security team identifies the developers (domain experts) who
94will form the initial response team for a particular issue. The initial
95response team can bring in further developers (domain experts) to address
96the issue in the best technical way.
97
98All involved developers pledge to adhere to the embargo rules and to keep
99the received information confidential. Violation of the pledge will lead to
100immediate exclusion from the current issue and removal from all related
101mailing-lists. In addition, the hardware security team will also exclude
102the offender from future issues. The impact of this consequence is a highly
103effective deterrent in our community. In case a violation happens the
104hardware security team will inform the involved parties immediately. If you
105or anyone becomes aware of a potential violation, please report it
106immediately to the Hardware security officers.
107
108
109Process
110^^^^^^^
111
112Due to the globally distributed nature of Linux kernel development,
113face-to-face meetings are almost impossible to address hardware security
114issues. Phone conferences are hard to coordinate due to time zones and
115other factors and should be only used when absolutely necessary. Encrypted
116email has been proven to be the most effective and secure communication
117method for these types of issues.
118
119Start of Disclosure
120"""""""""""""""""""
121
122Disclosure starts by contacting the Linux kernel hardware security team by
123email. This initial contact should contain a description of the problem and
124a list of any known affected hardware. If your organization builds or
125distributes the affected hardware, we encourage you to also consider what
126other hardware could be affected.
127
128The hardware security team will provide an incident-specific encrypted
129mailing-list which will be used for initial discussion with the reporter,
130further disclosure and coordination.
131
132The hardware security team will provide the disclosing party a list of
133developers (domain experts) who should be informed initially about the
134issue after confirming with the developers that they will adhere to this
135Memorandum of Understanding and the documented process. These developers
136form the initial response team and will be responsible for handling the
137issue after initial contact. The hardware security team is supporting the
138response team, but is not necessarily involved in the mitigation
139development process.
140
141While individual developers might be covered by a non-disclosure agreement
142via their employer, they cannot enter individual non-disclosure agreements
143in their role as Linux kernel developers. They will, however, agree to
144adhere to this documented process and the Memorandum of Understanding.
145
146
147Disclosure
148""""""""""
149
150The disclosing party provides detailed information to the initial response
151team via the specific encrypted mailing-list.
152
153From our experience the technical documentation of these issues is usually
154a sufficient starting point and further technical clarification is best
155done via email.
156
157Mitigation development
158""""""""""""""""""""""
159
160The initial response team sets up an encrypted mailing-list or repurposes
161an existing one if appropriate. The disclosing party should provide a list
162of contacts for all other parties who have already been, or should be
163informed about the issue. The response team contacts these parties so they
164can name experts who should be subscribed to the mailing-list.
165
166Using a mailing-list is close to the normal Linux development process and
167has been successfully used in developing mitigations for various hardware
168security issues in the past.
169
170The mailing-list operates in the same way as normal Linux development.
171Patches are posted, discussed and reviewed and if agreed on applied to a
172non-public git repository which is only accessible to the participating
173developers via a secure connection. The repository contains the main
174development branch against the mainline kernel and backport branches for
175stable kernel versions as necessary.
176
177The initial response team will identify further experts from the Linux
178kernel developer community as needed and inform the disclosing party about
179their participation. Bringing in experts can happen at any time of the
180development process and often needs to be handled in a timely manner.
181
182Coordinated release
183"""""""""""""""""""
184
185The involved parties will negotiate the date and time where the embargo
186ends. At that point the prepared mitigations are integrated into the
187relevant kernel trees and published.
188
189While we understand that hardware security issues need coordinated embargo
190time, the embargo time should be constrained to the minimum time which is
191required for all involved parties to develop, test and prepare the
192mitigations. Extending embargo time artificially to meet conference talk
193dates or other non-technical reasons is creating more work and burden for
194the involved developers and response teams as the patches need to be kept
195up to date in order to follow the ongoing upstream kernel development,
196which might create conflicting changes.
197
198CVE assignment
199""""""""""""""
200
201Neither the hardware security team nor the initial response team assign
202CVEs, nor are CVEs required for the development process. If CVEs are
203provided by the disclosing party they can be used for documentation
204purposes.
205
206Process ambassadors
207-------------------
208
209For assistance with this process we have established ambassadors in various
210organizations, who can answer questions about or provide guidance on the
211reporting process and further handling. Ambassadors are not involved in the
212disclosure of a particular issue, unless requested by a response team or by
213an involved disclosed party. The current ambassadors list:
214
215 ============= ========================================================
216 ARM
217 AMD
218 IBM
219 Intel
220 Qualcomm
221
222 Microsoft
223 VMware
224 XEN
225
226 Canonical Tyler Hicks <tyhicks@canonical.com>
227 Debian Ben Hutchings <ben@decadent.org.uk>
228 Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
229 Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
230 SUSE Jiri Kosina <jkosina@suse.cz>
231
232 Amazon
233 Google
234 ============== ========================================================
235
236If you want your organization to be added to the ambassadors list, please
237contact the hardware security team. The nominated ambassador has to
238understand and support our process fully and is ideally well connected in
239the Linux kernel community.
240
241Encrypted mailing-lists
242-----------------------
243
244We use encrypted mailing-lists for communication. The operating principle
245of these lists is that email sent to the list is encrypted either with the
246list's PGP key or with the list's S/MIME certificate. The mailing-list
247software decrypts the email and re-encrypts it individually for each
248subscriber with the subscriber's PGP key or S/MIME certificate. Details
249about the mailing-list software and the setup which is used to ensure the
250security of the lists and protection of the data can be found here:
251https://www.kernel.org/....
252
253List keys
254^^^^^^^^^
255
256For initial contact see :ref:`Contact`. For incident specific mailing-lists
257the key and S/MIME certificate are conveyed to the subscribers by email
258sent from the specific list.
259
260Subscription to incident specific lists
261^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
262
263Subscription is handled by the response teams. Disclosed parties who want
264to participate in the communication send a list of potential subscribers to
265the response team so the response team can validate subscription requests.
266
267Each subscriber needs to send a subscription request to the response team
268by email. The email must be signed with the subscriber's PGP key or S/MIME
269certificate. If a PGP key is used, it must be available from a public key
270server and is ideally connected to the Linux kernel's PGP web of trust. See
271also: https://www.kernel.org/signature.html.
272
273The response team verifies that the subscriber request is valid and adds
274the subscriber to the list. After subscription the subscriber will receive
275email from the mailing-list which is signed either with the list's PGP key
276or the list's S/MIME certificate. The subscriber's email client can extract
277the PGP key or the S/MIME certificate from the signature so the subscriber
278can send encrypted email to the list.
279
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 878ebfda7eef..e2c9ffc682c5 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -45,6 +45,7 @@ Other guides to the community that are of interest to most developers are:
45 submit-checklist 45 submit-checklist
46 kernel-docs 46 kernel-docs
47 deprecated 47 deprecated
48 embargoed-hardware-issues
48 49
49These are some overall technical guides that have been put here for now for 50These are some overall technical guides that have been put here for now for
50lack of a better place. 51lack of a better place.
diff --git a/Documentation/s390/vfio-ccw.rst b/Documentation/s390/vfio-ccw.rst
index 1e210c6afa88..fca9c4f5bd9c 100644
--- a/Documentation/s390/vfio-ccw.rst
+++ b/Documentation/s390/vfio-ccw.rst
@@ -180,6 +180,13 @@ The process of how these work together.
180 add it to an iommu_group and a vfio_group. Then we could pass through 180 add it to an iommu_group and a vfio_group. Then we could pass through
181 the mdev to a guest. 181 the mdev to a guest.
182 182
183
184VFIO-CCW Regions
185----------------
186
187The vfio-ccw driver exposes MMIO regions to accept requests from and return
188results to userspace.
189
183vfio-ccw I/O region 190vfio-ccw I/O region
184------------------- 191-------------------
185 192
@@ -205,6 +212,25 @@ irb_area stores the I/O result.
205 212
206ret_code stores a return code for each access of the region. 213ret_code stores a return code for each access of the region.
207 214
215This region is always available.
216
217vfio-ccw cmd region
218-------------------
219
220The vfio-ccw cmd region is used to accept asynchronous instructions
221from userspace::
222
223 #define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0)
224 #define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1)
225 struct ccw_cmd_region {
226 __u32 command;
227 __u32 ret_code;
228 } __packed;
229
230This region is exposed via region type VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD.
231
232Currently, CLEAR SUBCHANNEL and HALT SUBCHANNEL use this region.
233
208vfio-ccw operation details 234vfio-ccw operation details
209-------------------------- 235--------------------------
210 236
@@ -306,9 +332,8 @@ Together with the corresponding work in QEMU, we can bring the passed
306through DASD/ECKD device online in a guest now and use it as a block 332through DASD/ECKD device online in a guest now and use it as a block
307device. 333device.
308 334
309While the current code allows the guest to start channel programs via 335The current code allows the guest to start channel programs via
310START SUBCHANNEL, support for HALT SUBCHANNEL or CLEAR SUBCHANNEL is 336START SUBCHANNEL, and to issue HALT SUBCHANNEL and CLEAR SUBCHANNEL.
311not yet implemented.
312 337
313vfio-ccw supports classic (command mode) channel I/O only. Transport 338vfio-ccw supports classic (command mode) channel I/O only. Transport
314mode (HPF) is not supported. 339mode (HPF) is not supported.
diff --git a/Documentation/sh/conf.py b/Documentation/sh/conf.py
deleted file mode 100644
index 1eb684a13ac8..000000000000
--- a/Documentation/sh/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "SuperH architecture implementation manual"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'sh.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/sound/conf.py b/Documentation/sound/conf.py
deleted file mode 100644
index 3f1fc5e74e7b..000000000000
--- a/Documentation/sound/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Linux Sound Subsystem Documentation"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'sound.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
index 301a21aa4f63..eeb394b39e2c 100644
--- a/Documentation/sphinx/load_config.py
+++ b/Documentation/sphinx/load_config.py
@@ -21,6 +21,29 @@ def loadConfig(namespace):
21 and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ): 21 and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
22 config_file = os.path.abspath(config_file) 22 config_file = os.path.abspath(config_file)
23 23
24 # Let's avoid one conf.py file just due to latex_documents
25 start = config_file.find('Documentation/')
26 if start >= 0:
27 start = config_file.find('/', start + 1)
28
29 end = config_file.rfind('/')
30 if start >= 0 and end > 0:
31 dir = config_file[start + 1:end]
32
33 print("source directory: %s" % dir)
34 new_latex_docs = []
35 latex_documents = namespace['latex_documents']
36
37 for l in latex_documents:
38 if l[0].find(dir + '/') == 0:
39 has = True
40 fn = l[0][len(dir) + 1:]
41 new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
42 break
43
44 namespace['latex_documents'] = new_latex_docs
45
46 # If there is an extra conf.py file, load it
24 if os.path.isfile(config_file): 47 if os.path.isfile(config_file):
25 sys.stdout.write("load additional sphinx-config: %s\n" % config_file) 48 sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
26 config = namespace.copy() 49 config = namespace.copy()
@@ -29,4 +52,6 @@ def loadConfig(namespace):
29 del config['__file__'] 52 del config['__file__']
30 namespace.update(config) 53 namespace.update(config)
31 else: 54 else:
32 sys.stderr.write("WARNING: additional sphinx-config not found: %s\n" % config_file) 55 config = namespace.copy()
56 config['tags'].add("subproject")
57 namespace.update(config)
diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst
index 1739cba8863e..f1ad4504b734 100644
--- a/Documentation/translations/it_IT/doc-guide/sphinx.rst
+++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst
@@ -242,8 +242,9 @@ del kernel:
242* Per inserire blocchi di testo con caratteri a dimensione fissa (codici di 242* Per inserire blocchi di testo con caratteri a dimensione fissa (codici di
243 esempio, casi d'uso, eccetera): utilizzate ``::`` quando non è necessario 243 esempio, casi d'uso, eccetera): utilizzate ``::`` quando non è necessario
244 evidenziare la sintassi, specialmente per piccoli frammenti; invece, 244 evidenziare la sintassi, specialmente per piccoli frammenti; invece,
245 utilizzate ``.. code-block:: <language>`` per blocchi di più lunghi che 245 utilizzate ``.. code-block:: <language>`` per blocchi più lunghi che
246 potranno beneficiare dell'avere la sintassi evidenziata. 246 beneficeranno della sintassi evidenziata. Per un breve pezzo di codice da
247 inserire nel testo, usate \`\`.
247 248
248 249
249Il dominio C 250Il dominio C
@@ -267,12 +268,14 @@ molto comune come ``open`` o ``ioctl``:
267 268
268Il nome della funzione (per esempio ioctl) rimane nel testo ma il nome del suo 269Il nome della funzione (per esempio ioctl) rimane nel testo ma il nome del suo
269riferimento cambia da ``ioctl`` a ``VIDIOC_LOG_STATUS``. Anche la voce 270riferimento cambia da ``ioctl`` a ``VIDIOC_LOG_STATUS``. Anche la voce
270nell'indice cambia in ``VIDIOC_LOG_STATUS`` e si potrà quindi fare riferimento 271nell'indice cambia in ``VIDIOC_LOG_STATUS``.
271a questa funzione scrivendo: 272
272 273Notate che per una funzione non c'è bisogno di usare ``c:func:`` per generarne
273.. code-block:: rst 274i riferimenti nella documentazione. Grazie a qualche magica estensione a
274 275Sphinx, il sistema di generazione della documentazione trasformerà
275 :c:func:`VIDIOC_LOG_STATUS` 276automaticamente un riferimento ad una ``funzione()`` in un riferimento
277incrociato quando questa ha una voce nell'indice. Se trovate degli usi di
278``c:func:`` nella documentazione del kernel, sentitevi liberi di rimuoverli.
276 279
277 280
278Tabelle a liste 281Tabelle a liste
diff --git a/Documentation/translations/it_IT/process/index.rst b/Documentation/translations/it_IT/process/index.rst
index 2eda85d5cd1e..012de0f3154a 100644
--- a/Documentation/translations/it_IT/process/index.rst
+++ b/Documentation/translations/it_IT/process/index.rst
@@ -27,6 +27,7 @@ Di seguito le guide che ogni sviluppatore dovrebbe leggere.
27 code-of-conduct 27 code-of-conduct
28 development-process 28 development-process
29 submitting-patches 29 submitting-patches
30 programming-language
30 coding-style 31 coding-style
31 maintainer-pgp-guide 32 maintainer-pgp-guide
32 email-clients 33 email-clients
diff --git a/Documentation/translations/it_IT/process/kernel-docs.rst b/Documentation/translations/it_IT/process/kernel-docs.rst
index 7bd70d661737..38e0a955121a 100644
--- a/Documentation/translations/it_IT/process/kernel-docs.rst
+++ b/Documentation/translations/it_IT/process/kernel-docs.rst
@@ -1,6 +1,7 @@
1.. include:: ../disclaimer-ita.rst 1.. include:: ../disclaimer-ita.rst
2 2
3:Original: :ref:`Documentation/process/kernel-docs.rst <kernel_docs>` 3:Original: :ref:`Documentation/process/kernel-docs.rst <kernel_docs>`
4:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
4 5
5 6
6.. _it_kernel_docs: 7.. _it_kernel_docs:
@@ -8,6 +9,10 @@
8Indice di documenti per le persone interessate a capire e/o scrivere per il kernel Linux 9Indice di documenti per le persone interessate a capire e/o scrivere per il kernel Linux
9======================================================================================== 10========================================================================================
10 11
11.. warning:: 12.. note::
12 13 Questo documento contiene riferimenti a documenti in lingua inglese; inoltre
13 TODO ancora da tradurre 14 utilizza dai campi *ReStructuredText* di supporto alla ricerca e che per
15 questo motivo è meglio non tradurre al fine di garantirne un corretto
16 utilizzo.
17 Per questi motivi il documento non verrà tradotto. Per favore fate
18 riferimento al documento originale in lingua inglese.
diff --git a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
index 276db0e37f43..118fb4153e8f 100644
--- a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
+++ b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
@@ -248,7 +248,10 @@ possano ricevere la vostra nuova sottochiave::
248 kernel. 248 kernel.
249 249
250 Se per qualche ragione preferite rimanere con sottochiavi RSA, nel comando 250 Se per qualche ragione preferite rimanere con sottochiavi RSA, nel comando
251 precedente, sostituite "ed25519" con "rsa2048". 251 precedente, sostituite "ed25519" con "rsa2048". In aggiunta, se avete
252 intenzione di usare un dispositivo hardware che non supporta le chiavi
253 ED25519 ECC, come la Nitrokey Pro o la Yubikey, allora dovreste usare
254 "nistp256" al posto di "ed25519".
252 255
253Copia di riserva della chiave primaria per gestire il recupero da disastro 256Copia di riserva della chiave primaria per gestire il recupero da disastro
254-------------------------------------------------------------------------- 257--------------------------------------------------------------------------
@@ -449,23 +452,27 @@ implementi le funzionalità delle smartcard. Sul mercato ci sono diverse
449soluzioni disponibili: 452soluzioni disponibili:
450 453
451- `Nitrokey Start`_: è Open hardware e Free Software, è basata sul progetto 454- `Nitrokey Start`_: è Open hardware e Free Software, è basata sul progetto
452 `GnuK`_ della FSIJ. Ha il supporto per chiavi ECC, ma meno funzionalità di 455 `GnuK`_ della FSIJ. Questo è uno dei pochi dispositivi a supportare le chiavi
453 sicurezza (come la resistenza alla manomissione o alcuni attacchi ad un 456 ECC ED25519, ma offre meno funzionalità di sicurezza (come la resistenza
454 canale laterale). 457 alla manomissione o alcuni attacchi ad un canale laterale).
455- `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla 458- `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla
456 manomissione e offre più funzionalità di sicurezza, ma l'ECC. 459 manomissione e offre più funzionalità di sicurezza. La Pro 2 supporta la
457- `Yubikey 4`_: l'hardware e il software sono proprietari, ma è più economica 460 crittografia ECC (NISTP).
461- `Yubikey 5`_: l'hardware e il software sono proprietari, ma è più economica
458 della Nitrokey Pro ed è venduta anche con porta USB-C il che è utile con i 462 della Nitrokey Pro ed è venduta anche con porta USB-C il che è utile con i
459 computer portatili più recenti. In aggiunta, offre altre funzionalità di 463 computer portatili più recenti. In aggiunta, offre altre funzionalità di
460 sicurezza come FIDO, U2F, ma non l'ECC 464 sicurezza come FIDO, U2F, e ora supporta anche le chiavi ECC (NISTP)
461 465
462`Su LWN c'è una buona recensione`_ dei modelli elencati qui sopra e altri. 466`Su LWN c'è una buona recensione`_ dei modelli elencati qui sopra e altri.
467La scelta dipenderà dal costo, dalla disponibilità nella vostra area
468geografica e vostre considerazioni sull'hardware aperto/proprietario.
469
463Se volete usare chiavi ECC, la vostra migliore scelta sul mercato è la 470Se volete usare chiavi ECC, la vostra migliore scelta sul mercato è la
464Nitrokey Start. 471Nitrokey Start.
465 472
466.. _`Nitrokey Start`: https://shop.nitrokey.com/shop/product/nitrokey-start-6 473.. _`Nitrokey Start`: https://shop.nitrokey.com/shop/product/nitrokey-start-6
467.. _`Nitrokey Pro`: https://shop.nitrokey.com/shop/product/nitrokey-pro-3 474.. _`Nitrokey Pro 2`: https://shop.nitrokey.com/shop/product/nitrokey-pro-2-3
468.. _`Yubikey 4`: https://www.yubico.com/product/yubikey-4-series/ 475.. _`Yubikey 5`: https://www.yubico.com/product/yubikey-5-overview/
469.. _Gnuk: http://www.fsij.org/doc-gnuk/ 476.. _Gnuk: http://www.fsij.org/doc-gnuk/
470.. _`Su LWN c'è una buona recensione`: https://lwn.net/Articles/736231/ 477.. _`Su LWN c'è una buona recensione`: https://lwn.net/Articles/736231/
471 478
diff --git a/Documentation/translations/it_IT/process/programming-language.rst b/Documentation/translations/it_IT/process/programming-language.rst
new file mode 100644
index 000000000000..f4b006395849
--- /dev/null
+++ b/Documentation/translations/it_IT/process/programming-language.rst
@@ -0,0 +1,51 @@
1.. include:: ../disclaimer-ita.rst
2
3:Original: :ref:`Documentation/process/programming-language.rst <programming_language>`
4:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
5
6.. _it_programming_language:
7
8Linguaggio di programmazione
9============================
10
11Il kernel è scritto nel linguaggio di programmazione C [c-language]_.
12Più precisamente, il kernel viene compilato con ``gcc`` [gcc]_ usando
13l'opzione ``-std=gnu89`` [gcc-c-dialect-options]_: il dialetto GNU
14dello standard ISO C90 (con l'aggiunta di alcune funzionalità da C99)
15
16Questo dialetto contiene diverse estensioni al linguaggio [gnu-extensions]_,
17e molte di queste vengono usate sistematicamente dal kernel.
18
19Il kernel offre un certo livello di supporto per la compilazione con ``clang``
20[clang]_ e ``icc`` [icc]_ su diverse architetture, tuttavia in questo momento
21il supporto non è completo e richiede delle patch aggiuntive.
22
23Attributi
24---------
25
26Una delle estensioni più comuni e usate nel kernel sono gli attributi
27[gcc-attribute-syntax]_. Gli attributi permettono di aggiungere una semantica,
28definita dell'implementazione, alle entità del linguaggio (come le variabili,
29le funzioni o i tipi) senza dover fare importanti modifiche sintattiche al
30linguaggio stesso (come l'aggiunta di nuove parole chiave) [n2049]_.
31
32In alcuni casi, gli attributi sono opzionali (ovvero un compilatore che non
33dovesse supportarli dovrebbe produrre comunque codice corretto, anche se
34più lento o che non esegue controlli aggiuntivi durante la compilazione).
35
36Il kernel definisce alcune pseudo parole chiave (per esempio ``__pure``)
37in alternativa alla sintassi GNU per gli attributi (per esempio
38``__attribute__((__pure__))``) allo scopo di mostrare quali funzionalità si
39possono usare e/o per accorciare il codice.
40
41Per maggiori informazioni consultate il file d'intestazione
42``include/linux/compiler_attributes.h``.
43
44.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
45.. [gcc] https://gcc.gnu.org
46.. [clang] https://clang.llvm.org
47.. [icc] https://software.intel.com/en-us/c-compilers
48.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
49.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
50.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
51.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index a33c2a536542..2774624ee843 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -569,7 +569,7 @@ ACQUIRE 는 해당 오í¼ë ˆì´ì…˜ì˜ 로드 부분ì—ë§Œ ì ìš©ë˜ê³  RELEASE ë
569 569
570 [*] 버스 ë§ˆìŠ¤í„°ë§ DMA 와 ì¼ê´€ì„±ì— 대해서는 다ìŒì„ 참고하시기 ë°”ëžë‹ˆë‹¤: 570 [*] 버스 ë§ˆìŠ¤í„°ë§ DMA 와 ì¼ê´€ì„±ì— 대해서는 다ìŒì„ 참고하시기 ë°”ëžë‹ˆë‹¤:
571 571
572 Documentation/PCI/pci.rst 572 Documentation/driver-api/pci/pci.rst
573 Documentation/DMA-API-HOWTO.txt 573 Documentation/DMA-API-HOWTO.txt
574 Documentation/DMA-API.txt 574 Documentation/DMA-API.txt
575 575
diff --git a/Documentation/userspace-api/conf.py b/Documentation/userspace-api/conf.py
deleted file mode 100644
index 2eaf59f844e5..000000000000
--- a/Documentation/userspace-api/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "The Linux kernel user-space API guide"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'userspace-api.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/virtual/index.rst b/Documentation/virt/index.rst
index 062ffb527043..062ffb527043 100644
--- a/Documentation/virtual/index.rst
+++ b/Documentation/virt/index.rst
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst
index d18c97b4e140..d18c97b4e140 100644
--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/amd-memory-encryption.rst
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virt/kvm/api.txt
index e54a3f51ddc5..2d067767b617 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virt/kvm/api.txt
@@ -3781,7 +3781,7 @@ encrypted VMs.
3781 3781
3782Currently, this ioctl is used for issuing Secure Encrypted Virtualization 3782Currently, this ioctl is used for issuing Secure Encrypted Virtualization
3783(SEV) commands on AMD Processors. The SEV commands are defined in 3783(SEV) commands on AMD Processors. The SEV commands are defined in
3784Documentation/virtual/kvm/amd-memory-encryption.rst. 3784Documentation/virt/kvm/amd-memory-encryption.rst.
3785 3785
37864.111 KVM_MEMORY_ENCRYPT_REG_REGION 37864.111 KVM_MEMORY_ENCRYPT_REG_REGION
3787 3787
diff --git a/Documentation/virtual/kvm/arm/hyp-abi.txt b/Documentation/virt/kvm/arm/hyp-abi.txt
index a20a0bee268d..a20a0bee268d 100644
--- a/Documentation/virtual/kvm/arm/hyp-abi.txt
+++ b/Documentation/virt/kvm/arm/hyp-abi.txt
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virt/kvm/arm/psci.txt
index 559586fc9d37..559586fc9d37 100644
--- a/Documentation/virtual/kvm/arm/psci.txt
+++ b/Documentation/virt/kvm/arm/psci.txt
diff --git a/Documentation/virtual/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
index 01b081f6e7ea..01b081f6e7ea 100644
--- a/Documentation/virtual/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/cpuid.rst
diff --git a/Documentation/virtual/kvm/devices/README b/Documentation/virt/kvm/devices/README
index 34a69834124a..34a69834124a 100644
--- a/Documentation/virtual/kvm/devices/README
+++ b/Documentation/virt/kvm/devices/README
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virt/kvm/devices/arm-vgic-its.txt
index eeaa95b893a8..eeaa95b893a8 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic-its.txt
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virt/kvm/devices/arm-vgic-v3.txt
index ff290b43c8e5..ff290b43c8e5 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic-v3.txt
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virt/kvm/devices/arm-vgic.txt
index 97b6518148f8..97b6518148f8 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic.txt
diff --git a/Documentation/virtual/kvm/devices/mpic.txt b/Documentation/virt/kvm/devices/mpic.txt
index 8257397adc3c..8257397adc3c 100644
--- a/Documentation/virtual/kvm/devices/mpic.txt
+++ b/Documentation/virt/kvm/devices/mpic.txt
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virt/kvm/devices/s390_flic.txt
index a4e20a090174..a4e20a090174 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virt/kvm/devices/s390_flic.txt
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virt/kvm/devices/vcpu.txt
index 2b5dab16c4f2..2b5dab16c4f2 100644
--- a/Documentation/virtual/kvm/devices/vcpu.txt
+++ b/Documentation/virt/kvm/devices/vcpu.txt
diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virt/kvm/devices/vfio.txt
index 528c77c8022c..528c77c8022c 100644
--- a/Documentation/virtual/kvm/devices/vfio.txt
+++ b/Documentation/virt/kvm/devices/vfio.txt
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virt/kvm/devices/vm.txt
index 4ffb82b02468..4ffb82b02468 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virt/kvm/devices/vm.txt
diff --git a/Documentation/virtual/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.txt
index 42864935ac5d..42864935ac5d 100644
--- a/Documentation/virtual/kvm/devices/xics.txt
+++ b/Documentation/virt/kvm/devices/xics.txt
diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.txt
index 9a24a4525253..9a24a4525253 100644
--- a/Documentation/virtual/kvm/devices/xive.txt
+++ b/Documentation/virt/kvm/devices/xive.txt
diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virt/kvm/halt-polling.txt
index 4f791b128dd2..4f791b128dd2 100644
--- a/Documentation/virtual/kvm/halt-polling.txt
+++ b/Documentation/virt/kvm/halt-polling.txt
diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virt/kvm/hypercalls.txt
index da210651f714..5f6d291bd004 100644
--- a/Documentation/virtual/kvm/hypercalls.txt
+++ b/Documentation/virt/kvm/hypercalls.txt
@@ -18,7 +18,7 @@ S390:
18 number in R1. 18 number in R1.
19 19
20 For further information on the S390 diagnose call as supported by KVM, 20 For further information on the S390 diagnose call as supported by KVM,
21 refer to Documentation/virtual/kvm/s390-diag.txt. 21 refer to Documentation/virt/kvm/s390-diag.txt.
22 22
23 PowerPC: 23 PowerPC:
24 It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers. 24 It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers.
@@ -26,7 +26,7 @@ S390:
26 26
27 KVM hypercalls uses 4 byte opcode, that are patched with 'hypercall-instructions' 27 KVM hypercalls uses 4 byte opcode, that are patched with 'hypercall-instructions'
28 property inside the device tree's /hypervisor node. 28 property inside the device tree's /hypervisor node.
29 For more information refer to Documentation/virtual/kvm/ppc-pv.txt 29 For more information refer to Documentation/virt/kvm/ppc-pv.txt
30 30
31MIPS: 31MIPS:
32 KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall 32 KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
diff --git a/Documentation/virtual/kvm/index.rst b/Documentation/virt/kvm/index.rst
index 0b206a06f5be..ada224a511fe 100644
--- a/Documentation/virtual/kvm/index.rst
+++ b/Documentation/virt/kvm/index.rst
@@ -9,3 +9,4 @@ KVM
9 9
10 amd-memory-encryption 10 amd-memory-encryption
11 cpuid 11 cpuid
12 vcpu-requests
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virt/kvm/locking.txt
index 635cd6eaf714..635cd6eaf714 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virt/kvm/locking.txt
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt
index 2efe0efc516e..1b9880dfba0a 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virt/kvm/mmu.txt
@@ -298,7 +298,7 @@ Handling a page fault is performed as follows:
298 vcpu->arch.mmio_gfn, and call the emulator 298 vcpu->arch.mmio_gfn, and call the emulator
299 - If both P bit and R/W bit of error code are set, this could possibly 299 - If both P bit and R/W bit of error code are set, this could possibly
300 be handled as a "fast page fault" (fixed without taking the MMU lock). See 300 be handled as a "fast page fault" (fixed without taking the MMU lock). See
301 the description in Documentation/virtual/kvm/locking.txt. 301 the description in Documentation/virt/kvm/locking.txt.
302 - if needed, walk the guest page tables to determine the guest translation 302 - if needed, walk the guest page tables to determine the guest translation
303 (gva->gpa or ngpa->gpa) 303 (gva->gpa or ngpa->gpa)
304 - if permissions are insufficient, reflect the fault back to the guest 304 - if permissions are insufficient, reflect the fault back to the guest
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virt/kvm/msr.txt
index df1f4338b3ca..df1f4338b3ca 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virt/kvm/msr.txt
diff --git a/Documentation/virtual/kvm/nested-vmx.txt b/Documentation/virt/kvm/nested-vmx.txt
index 97eb1353e962..97eb1353e962 100644
--- a/Documentation/virtual/kvm/nested-vmx.txt
+++ b/Documentation/virt/kvm/nested-vmx.txt
diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virt/kvm/ppc-pv.txt
index e26115ce4258..e26115ce4258 100644
--- a/Documentation/virtual/kvm/ppc-pv.txt
+++ b/Documentation/virt/kvm/ppc-pv.txt
diff --git a/Documentation/virtual/kvm/review-checklist.txt b/Documentation/virt/kvm/review-checklist.txt
index a83b27635fdd..499af499e296 100644
--- a/Documentation/virtual/kvm/review-checklist.txt
+++ b/Documentation/virt/kvm/review-checklist.txt
@@ -7,7 +7,7 @@ Review checklist for kvm patches
72. Patches should be against kvm.git master branch. 72. Patches should be against kvm.git master branch.
8 8
93. If the patch introduces or modifies a new userspace API: 93. If the patch introduces or modifies a new userspace API:
10 - the API must be documented in Documentation/virtual/kvm/api.txt 10 - the API must be documented in Documentation/virt/kvm/api.txt
11 - the API must be discoverable using KVM_CHECK_EXTENSION 11 - the API must be discoverable using KVM_CHECK_EXTENSION
12 12
134. New state must include support for save/restore. 134. New state must include support for save/restore.
diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virt/kvm/s390-diag.txt
index 7c52e5f8b210..7c52e5f8b210 100644
--- a/Documentation/virtual/kvm/s390-diag.txt
+++ b/Documentation/virt/kvm/s390-diag.txt
diff --git a/Documentation/virtual/kvm/timekeeping.txt b/Documentation/virt/kvm/timekeeping.txt
index 76808a17ad84..76808a17ad84 100644
--- a/Documentation/virtual/kvm/timekeeping.txt
+++ b/Documentation/virt/kvm/timekeeping.txt
diff --git a/Documentation/virtual/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index 5feb3706a7ae..5feb3706a7ae 100644
--- a/Documentation/virtual/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
diff --git a/Documentation/virtual/paravirt_ops.rst b/Documentation/virt/paravirt_ops.rst
index 6b789d27cead..6b789d27cead 100644
--- a/Documentation/virtual/paravirt_ops.rst
+++ b/Documentation/virt/paravirt_ops.rst
diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virt/uml/UserModeLinux-HOWTO.txt
index 87b80f589e1c..87b80f589e1c 100644
--- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/virt/uml/UserModeLinux-HOWTO.txt
diff --git a/Documentation/vm/conf.py b/Documentation/vm/conf.py
deleted file mode 100644
index 3b0b601af558..000000000000
--- a/Documentation/vm/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "Linux Memory Management Documentation"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'memory-management.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 7d90964abbb0..710ce1c701bf 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -237,7 +237,7 @@ The usage pattern is::
237 ret = hmm_range_snapshot(&range); 237 ret = hmm_range_snapshot(&range);
238 if (ret) { 238 if (ret) {
239 up_read(&mm->mmap_sem); 239 up_read(&mm->mmap_sem);
240 if (ret == -EAGAIN) { 240 if (ret == -EBUSY) {
241 /* 241 /*
242 * No need to check hmm_range_wait_until_valid() return value 242 * No need to check hmm_range_wait_until_valid() return value
243 * on retry we will get proper error with hmm_range_snapshot() 243 * on retry we will get proper error with hmm_range_snapshot()
diff --git a/Documentation/watchdog/hpwdt.rst b/Documentation/watchdog/hpwdt.rst
index c165d92cfd12..c824cd7f6e32 100644
--- a/Documentation/watchdog/hpwdt.rst
+++ b/Documentation/watchdog/hpwdt.rst
@@ -63,7 +63,7 @@ Last reviewed: 08/20/2018
63 and loop forever. This is generally not what a watchdog user wants. 63 and loop forever. This is generally not what a watchdog user wants.
64 64
65 For those wishing to learn more please see: 65 For those wishing to learn more please see:
66 Documentation/kdump/kdump.rst 66 Documentation/admin-guide/kdump/kdump.rst
67 Documentation/admin-guide/kernel-parameters.txt (panic=) 67 Documentation/admin-guide/kernel-parameters.txt (panic=)
68 Your Linux Distribution specific documentation. 68 Your Linux Distribution specific documentation.
69 69
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
deleted file mode 100644
index 33c5c3142e20..000000000000
--- a/Documentation/x86/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
1# -*- coding: utf-8; mode: python -*-
2
3project = "X86 architecture specific documentation"
4
5tags.add("subproject")
6
7latex_documents = [
8 ('index', 'x86.tex', project,
9 'The kernel development community', 'manual'),
10]
diff --git a/MAINTAINERS b/MAINTAINERS
index 783569e3c4b4..e7a47b5210fd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
183M: Heiner Kallweit <hkallweit1@gmail.com> 183M: Heiner Kallweit <hkallweit1@gmail.com>
184L: netdev@vger.kernel.org 184L: netdev@vger.kernel.org
185S: Maintained 185S: Maintained
186F: drivers/net/ethernet/realtek/r8169.c 186F: drivers/net/ethernet/realtek/r8169*
187 187
1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER 1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER
189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -683,7 +683,7 @@ S: Maintained
683F: drivers/crypto/sunxi-ss/ 683F: drivers/crypto/sunxi-ss/
684 684
685ALLWINNER VPU DRIVER 685ALLWINNER VPU DRIVER
686M: Maxime Ripard <maxime.ripard@bootlin.com> 686M: Maxime Ripard <mripard@kernel.org>
687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> 687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
688L: linux-media@vger.kernel.org 688L: linux-media@vger.kernel.org
689S: Maintained 689S: Maintained
@@ -899,7 +899,7 @@ L: linux-iio@vger.kernel.org
899W: http://ez.analog.com/community/linux-device-drivers 899W: http://ez.analog.com/community/linux-device-drivers
900S: Supported 900S: Supported
901F: drivers/iio/adc/ad7124.c 901F: drivers/iio/adc/ad7124.c
902F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt 902F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
903 903
904ANALOG DEVICES INC AD7606 DRIVER 904ANALOG DEVICES INC AD7606 DRIVER
905M: Stefan Popa <stefan.popa@analog.com> 905M: Stefan Popa <stefan.popa@analog.com>
@@ -1194,7 +1194,7 @@ F: include/uapi/linux/if_arcnet.h
1194 1194
1195ARM ARCHITECTED TIMER DRIVER 1195ARM ARCHITECTED TIMER DRIVER
1196M: Mark Rutland <mark.rutland@arm.com> 1196M: Mark Rutland <mark.rutland@arm.com>
1197M: Marc Zyngier <marc.zyngier@arm.com> 1197M: Marc Zyngier <maz@kernel.org>
1198L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1198L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1199S: Maintained 1199S: Maintained
1200F: arch/arm/include/asm/arch_timer.h 1200F: arch/arm/include/asm/arch_timer.h
@@ -1408,7 +1408,7 @@ S: Maintained
1408F: drivers/clk/sunxi/ 1408F: drivers/clk/sunxi/
1409 1409
1410ARM/Allwinner sunXi SoC support 1410ARM/Allwinner sunXi SoC support
1411M: Maxime Ripard <maxime.ripard@bootlin.com> 1411M: Maxime Ripard <mripard@kernel.org>
1412M: Chen-Yu Tsai <wens@csie.org> 1412M: Chen-Yu Tsai <wens@csie.org>
1413L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1413L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1414S: Maintained 1414S: Maintained
@@ -2155,10 +2155,12 @@ F: Documentation/devicetree/bindings/arm/realtek.txt
2155 2155
2156ARM/RENESAS ARM64 ARCHITECTURE 2156ARM/RENESAS ARM64 ARCHITECTURE
2157M: Simon Horman <horms@verge.net.au> 2157M: Simon Horman <horms@verge.net.au>
2158M: Geert Uytterhoeven <geert+renesas@glider.be>
2158M: Magnus Damm <magnus.damm@gmail.com> 2159M: Magnus Damm <magnus.damm@gmail.com>
2159L: linux-renesas-soc@vger.kernel.org 2160L: linux-renesas-soc@vger.kernel.org
2160Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ 2161Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
2161T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next 2162T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
2163T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
2162S: Supported 2164S: Supported
2163F: arch/arm64/boot/dts/renesas/ 2165F: arch/arm64/boot/dts/renesas/
2164F: Documentation/devicetree/bindings/arm/renesas.yaml 2166F: Documentation/devicetree/bindings/arm/renesas.yaml
@@ -2269,10 +2271,12 @@ F: drivers/media/platform/s5p-mfc/
2269 2271
2270ARM/SHMOBILE ARM ARCHITECTURE 2272ARM/SHMOBILE ARM ARCHITECTURE
2271M: Simon Horman <horms@verge.net.au> 2273M: Simon Horman <horms@verge.net.au>
2274M: Geert Uytterhoeven <geert+renesas@glider.be>
2272M: Magnus Damm <magnus.damm@gmail.com> 2275M: Magnus Damm <magnus.damm@gmail.com>
2273L: linux-renesas-soc@vger.kernel.org 2276L: linux-renesas-soc@vger.kernel.org
2274Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ 2277Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
2275T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next 2278T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
2279T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
2276S: Supported 2280S: Supported
2277F: arch/arm/boot/dts/emev2* 2281F: arch/arm/boot/dts/emev2*
2278F: arch/arm/boot/dts/gr-peach* 2282F: arch/arm/boot/dts/gr-peach*
@@ -3573,7 +3577,7 @@ F: Documentation/filesystems/caching/cachefiles.txt
3573F: fs/cachefiles/ 3577F: fs/cachefiles/
3574 3578
3575CADENCE MIPI-CSI2 BRIDGES 3579CADENCE MIPI-CSI2 BRIDGES
3576M: Maxime Ripard <maxime.ripard@bootlin.com> 3580M: Maxime Ripard <mripard@kernel.org>
3577L: linux-media@vger.kernel.org 3581L: linux-media@vger.kernel.org
3578S: Maintained 3582S: Maintained
3579F: Documentation/devicetree/bindings/media/cdns,*.txt 3583F: Documentation/devicetree/bindings/media/cdns,*.txt
@@ -4190,7 +4194,7 @@ M: Jens Axboe <axboe@kernel.dk>
4190L: cgroups@vger.kernel.org 4194L: cgroups@vger.kernel.org
4191L: linux-block@vger.kernel.org 4195L: linux-block@vger.kernel.org
4192T: git git://git.kernel.dk/linux-block 4196T: git git://git.kernel.dk/linux-block
4193F: Documentation/cgroup-v1/blkio-controller.rst 4197F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst
4194F: block/blk-cgroup.c 4198F: block/blk-cgroup.c
4195F: include/linux/blk-cgroup.h 4199F: include/linux/blk-cgroup.h
4196F: block/blk-throttle.c 4200F: block/blk-throttle.c
@@ -4469,7 +4473,7 @@ F: arch/powerpc/platforms/powernv/pci-cxl.c
4469F: drivers/misc/cxl/ 4473F: drivers/misc/cxl/
4470F: include/misc/cxl* 4474F: include/misc/cxl*
4471F: include/uapi/misc/cxl.h 4475F: include/uapi/misc/cxl.h
4472F: Documentation/powerpc/cxl.txt 4476F: Documentation/powerpc/cxl.rst
4473F: Documentation/ABI/testing/sysfs-class-cxl 4477F: Documentation/ABI/testing/sysfs-class-cxl
4474 4478
4475CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER 4479CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
@@ -4480,7 +4484,7 @@ L: linux-scsi@vger.kernel.org
4480S: Supported 4484S: Supported
4481F: drivers/scsi/cxlflash/ 4485F: drivers/scsi/cxlflash/
4482F: include/uapi/scsi/cxlflash_ioctl.h 4486F: include/uapi/scsi/cxlflash_ioctl.h
4483F: Documentation/powerpc/cxlflash.txt 4487F: Documentation/powerpc/cxlflash.rst
4484 4488
4485CYBERPRO FB DRIVER 4489CYBERPRO FB DRIVER
4486M: Russell King <linux@armlinux.org.uk> 4490M: Russell King <linux@armlinux.org.uk>
@@ -5291,7 +5295,7 @@ F: include/linux/vga*
5291 5295
5292DRM DRIVERS AND MISC GPU PATCHES 5296DRM DRIVERS AND MISC GPU PATCHES
5293M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> 5297M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
5294M: Maxime Ripard <maxime.ripard@bootlin.com> 5298M: Maxime Ripard <mripard@kernel.org>
5295M: Sean Paul <sean@poorly.run> 5299M: Sean Paul <sean@poorly.run>
5296W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html 5300W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
5297S: Maintained 5301S: Maintained
@@ -5304,7 +5308,7 @@ F: include/uapi/drm/drm*
5304F: include/linux/vga* 5308F: include/linux/vga*
5305 5309
5306DRM DRIVERS FOR ALLWINNER A10 5310DRM DRIVERS FOR ALLWINNER A10
5307M: Maxime Ripard <maxime.ripard@bootlin.com> 5311M: Maxime Ripard <mripard@kernel.org>
5308L: dri-devel@lists.freedesktop.org 5312L: dri-devel@lists.freedesktop.org
5309S: Supported 5313S: Supported
5310F: drivers/gpu/drm/sun4i/ 5314F: drivers/gpu/drm/sun4i/
@@ -6061,7 +6065,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
6061M: Heiner Kallweit <hkallweit1@gmail.com> 6065M: Heiner Kallweit <hkallweit1@gmail.com>
6062L: netdev@vger.kernel.org 6066L: netdev@vger.kernel.org
6063S: Maintained 6067S: Maintained
6064F: Documentation/ABI/testing/sysfs-bus-mdio 6068F: Documentation/ABI/testing/sysfs-class-net-phydev
6065F: Documentation/devicetree/bindings/net/ethernet-phy.yaml 6069F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
6066F: Documentation/devicetree/bindings/net/mdio* 6070F: Documentation/devicetree/bindings/net/mdio*
6067F: Documentation/networking/phy.rst 6071F: Documentation/networking/phy.rst
@@ -6322,7 +6326,8 @@ F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt
6322F: drivers/counter/ftm-quaddec.c 6326F: drivers/counter/ftm-quaddec.c
6323 6327
6324FLOPPY DRIVER 6328FLOPPY DRIVER
6325S: Orphan 6329M: Denis Efremov <efremov@linux.com>
6330S: Odd Fixes
6326L: linux-block@vger.kernel.org 6331L: linux-block@vger.kernel.org
6327F: drivers/block/floppy.c 6332F: drivers/block/floppy.c
6328 6333
@@ -6339,7 +6344,7 @@ FPGA MANAGER FRAMEWORK
6339M: Moritz Fischer <mdf@kernel.org> 6344M: Moritz Fischer <mdf@kernel.org>
6340L: linux-fpga@vger.kernel.org 6345L: linux-fpga@vger.kernel.org
6341S: Maintained 6346S: Maintained
6342T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git 6347T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
6343Q: http://patchwork.kernel.org/project/linux-fpga/list/ 6348Q: http://patchwork.kernel.org/project/linux-fpga/list/
6344F: Documentation/fpga/ 6349F: Documentation/fpga/
6345F: Documentation/driver-api/fpga/ 6350F: Documentation/driver-api/fpga/
@@ -6372,7 +6377,7 @@ FRAMEBUFFER LAYER
6372M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> 6377M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6373L: dri-devel@lists.freedesktop.org 6378L: dri-devel@lists.freedesktop.org
6374L: linux-fbdev@vger.kernel.org 6379L: linux-fbdev@vger.kernel.org
6375T: git git://github.com/bzolnier/linux.git 6380T: git git://anongit.freedesktop.org/drm/drm-misc
6376Q: http://patchwork.kernel.org/project/linux-fbdev/list/ 6381Q: http://patchwork.kernel.org/project/linux-fbdev/list/
6377S: Maintained 6382S: Maintained
6378F: Documentation/fb/ 6383F: Documentation/fb/
@@ -6436,6 +6441,14 @@ S: Maintained
6436F: drivers/perf/fsl_imx8_ddr_perf.c 6441F: drivers/perf/fsl_imx8_ddr_perf.c
6437F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt 6442F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
6438 6443
6444FREESCALE IMX I2C DRIVER
6445M: Oleksij Rempel <o.rempel@pengutronix.de>
6446R: Pengutronix Kernel Team <kernel@pengutronix.de>
6447L: linux-i2c@vger.kernel.org
6448S: Maintained
6449F: drivers/i2c/busses/i2c-imx.c
6450F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
6451
6439FREESCALE IMX LPI2C DRIVER 6452FREESCALE IMX LPI2C DRIVER
6440M: Dong Aisheng <aisheng.dong@nxp.com> 6453M: Dong Aisheng <aisheng.dong@nxp.com>
6441L: linux-i2c@vger.kernel.org 6454L: linux-i2c@vger.kernel.org
@@ -6822,13 +6835,6 @@ F: Documentation/filesystems/gfs2*.txt
6822F: fs/gfs2/ 6835F: fs/gfs2/
6823F: include/uapi/linux/gfs2_ondisk.h 6836F: include/uapi/linux/gfs2_ondisk.h
6824 6837
6825GIGASET ISDN DRIVERS
6826M: Paul Bolle <pebolle@tiscali.nl>
6827L: gigaset307x-common@lists.sourceforge.net
6828W: http://gigaset307x.sourceforge.net/
6829S: Odd Fixes
6830F: drivers/staging/isdn/gigaset/
6831
6832GNSS SUBSYSTEM 6838GNSS SUBSYSTEM
6833M: Johan Hovold <johan@kernel.org> 6839M: Johan Hovold <johan@kernel.org>
6834T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git 6840T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
@@ -6856,7 +6862,7 @@ R: Sagi Shahar <sagis@google.com>
6856R: Jon Olson <jonolson@google.com> 6862R: Jon Olson <jonolson@google.com>
6857L: netdev@vger.kernel.org 6863L: netdev@vger.kernel.org
6858S: Supported 6864S: Supported
6859F: Documentation/networking/device_drivers/google/gve.txt 6865F: Documentation/networking/device_drivers/google/gve.rst
6860F: drivers/net/ethernet/google 6866F: drivers/net/ethernet/google
6861 6867
6862GPD POCKET FAN DRIVER 6868GPD POCKET FAN DRIVER
@@ -7454,7 +7460,7 @@ F: drivers/net/hyperv/
7454F: drivers/scsi/storvsc_drv.c 7460F: drivers/scsi/storvsc_drv.c
7455F: drivers/uio/uio_hv_generic.c 7461F: drivers/uio/uio_hv_generic.c
7456F: drivers/video/fbdev/hyperv_fb.c 7462F: drivers/video/fbdev/hyperv_fb.c
7457F: drivers/iommu/hyperv_iommu.c 7463F: drivers/iommu/hyperv-iommu.c
7458F: net/vmw_vsock/hyperv_transport.c 7464F: net/vmw_vsock/hyperv_transport.c
7459F: include/clocksource/hyperv_timer.h 7465F: include/clocksource/hyperv_timer.h
7460F: include/linux/hyperv.h 7466F: include/linux/hyperv.h
@@ -7507,7 +7513,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
7507M: Gregory CLEMENT <gregory.clement@bootlin.com> 7513M: Gregory CLEMENT <gregory.clement@bootlin.com>
7508L: linux-i2c@vger.kernel.org 7514L: linux-i2c@vger.kernel.org
7509S: Maintained 7515S: Maintained
7510F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt 7516F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
7511F: drivers/i2c/busses/i2c-mv64xxx.c 7517F: drivers/i2c/busses/i2c-mv64xxx.c
7512 7518
7513I2C OVER PARALLEL PORT 7519I2C OVER PARALLEL PORT
@@ -8044,6 +8050,7 @@ S: Maintained
8044F: drivers/video/fbdev/i810/ 8050F: drivers/video/fbdev/i810/
8045 8051
8046INTEL ASoC DRIVERS 8052INTEL ASoC DRIVERS
8053M: Cezary Rojewski <cezary.rojewski@intel.com>
8047M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> 8054M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
8048M: Liam Girdwood <liam.r.girdwood@linux.intel.com> 8055M: Liam Girdwood <liam.r.girdwood@linux.intel.com>
8049M: Jie Yang <yang.jie@linux.intel.com> 8056M: Jie Yang <yang.jie@linux.intel.com>
@@ -8065,6 +8072,13 @@ T: git git://git.code.sf.net/p/intel-sas/isci
8065S: Supported 8072S: Supported
8066F: drivers/scsi/isci/ 8073F: drivers/scsi/isci/
8067 8074
8075INTEL CPU family model numbers
8076M: Tony Luck <tony.luck@intel.com>
8077M: x86@kernel.org
8078L: linux-kernel@vger.kernel.org
8079S: Supported
8080F: arch/x86/include/asm/intel-family.h
8081
8068INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 8082INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
8069M: Jani Nikula <jani.nikula@linux.intel.com> 8083M: Jani Nikula <jani.nikula@linux.intel.com>
8070M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> 8084M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
@@ -8416,7 +8430,6 @@ L: linux-xfs@vger.kernel.org
8416L: linux-fsdevel@vger.kernel.org 8430L: linux-fsdevel@vger.kernel.org
8417T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git 8431T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
8418S: Supported 8432S: Supported
8419F: fs/iomap.c
8420F: fs/iomap/ 8433F: fs/iomap/
8421F: include/linux/iomap.h 8434F: include/linux/iomap.h
8422 8435
@@ -8441,11 +8454,6 @@ S: Maintained
8441F: fs/io_uring.c 8454F: fs/io_uring.c
8442F: include/uapi/linux/io_uring.h 8455F: include/uapi/linux/io_uring.h
8443 8456
8444IP MASQUERADING
8445M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
8446S: Maintained
8447F: net/ipv4/netfilter/ipt_MASQUERADE.c
8448
8449IPMI SUBSYSTEM 8457IPMI SUBSYSTEM
8450M: Corey Minyard <minyard@acm.org> 8458M: Corey Minyard <minyard@acm.org>
8451L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) 8459L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -8490,7 +8498,7 @@ S: Obsolete
8490F: include/uapi/linux/ipx.h 8498F: include/uapi/linux/ipx.h
8491 8499
8492IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 8500IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
8493M: Marc Zyngier <marc.zyngier@arm.com> 8501M: Marc Zyngier <maz@kernel.org>
8494S: Maintained 8502S: Maintained
8495T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 8503T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
8496F: Documentation/IRQ-domain.txt 8504F: Documentation/IRQ-domain.txt
@@ -8508,7 +8516,7 @@ F: kernel/irq/
8508IRQCHIP DRIVERS 8516IRQCHIP DRIVERS
8509M: Thomas Gleixner <tglx@linutronix.de> 8517M: Thomas Gleixner <tglx@linutronix.de>
8510M: Jason Cooper <jason@lakedaemon.net> 8518M: Jason Cooper <jason@lakedaemon.net>
8511M: Marc Zyngier <marc.zyngier@arm.com> 8519M: Marc Zyngier <maz@kernel.org>
8512L: linux-kernel@vger.kernel.org 8520L: linux-kernel@vger.kernel.org
8513S: Maintained 8521S: Maintained
8514T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 8522T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -8808,7 +8816,7 @@ L: kvm@vger.kernel.org
8808W: http://www.linux-kvm.org 8816W: http://www.linux-kvm.org
8809T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 8817T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
8810S: Supported 8818S: Supported
8811F: Documentation/virtual/kvm/ 8819F: Documentation/virt/kvm/
8812F: include/trace/events/kvm.h 8820F: include/trace/events/kvm.h
8813F: include/uapi/asm-generic/kvm* 8821F: include/uapi/asm-generic/kvm*
8814F: include/uapi/linux/kvm* 8822F: include/uapi/linux/kvm*
@@ -8819,19 +8827,11 @@ F: virt/kvm/*
8819F: tools/kvm/ 8827F: tools/kvm/
8820F: tools/testing/selftests/kvm/ 8828F: tools/testing/selftests/kvm/
8821 8829
8822KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
8823M: Joerg Roedel <joro@8bytes.org>
8824L: kvm@vger.kernel.org
8825W: http://www.linux-kvm.org/
8826S: Maintained
8827F: arch/x86/include/asm/svm.h
8828F: arch/x86/kvm/svm.c
8829
8830KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) 8830KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
8831M: Marc Zyngier <marc.zyngier@arm.com> 8831M: Marc Zyngier <maz@kernel.org>
8832R: James Morse <james.morse@arm.com> 8832R: James Morse <james.morse@arm.com>
8833R: Julien Thierry <julien.thierry@arm.com> 8833R: Julien Thierry <julien.thierry.kdev@gmail.com>
8834R: Suzuki K Pouloze <suzuki.poulose@arm.com> 8834R: Suzuki K Poulose <suzuki.poulose@arm.com>
8835L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8835L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8836L: kvmarm@lists.cs.columbia.edu 8836L: kvmarm@lists.cs.columbia.edu
8837T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git 8837T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
@@ -8869,7 +8869,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
8869M: Janosch Frank <frankja@linux.ibm.com> 8869M: Janosch Frank <frankja@linux.ibm.com>
8870R: David Hildenbrand <david@redhat.com> 8870R: David Hildenbrand <david@redhat.com>
8871R: Cornelia Huck <cohuck@redhat.com> 8871R: Cornelia Huck <cohuck@redhat.com>
8872L: linux-s390@vger.kernel.org 8872L: kvm@vger.kernel.org
8873W: http://www.ibm.com/developerworks/linux/linux390/ 8873W: http://www.ibm.com/developerworks/linux/linux390/
8874T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git 8874T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
8875S: Supported 8875S: Supported
@@ -8884,6 +8884,11 @@ F: tools/testing/selftests/kvm/*/s390x/
8884KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) 8884KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
8885M: Paolo Bonzini <pbonzini@redhat.com> 8885M: Paolo Bonzini <pbonzini@redhat.com>
8886M: Radim KrÄmář <rkrcmar@redhat.com> 8886M: Radim KrÄmář <rkrcmar@redhat.com>
8887R: Sean Christopherson <sean.j.christopherson@intel.com>
8888R: Vitaly Kuznetsov <vkuznets@redhat.com>
8889R: Wanpeng Li <wanpengli@tencent.com>
8890R: Jim Mattson <jmattson@google.com>
8891R: Joerg Roedel <joro@8bytes.org>
8887L: kvm@vger.kernel.org 8892L: kvm@vger.kernel.org
8888W: http://www.linux-kvm.org 8893W: http://www.linux-kvm.org
8889T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 8894T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -8891,8 +8896,12 @@ S: Supported
8891F: arch/x86/kvm/ 8896F: arch/x86/kvm/
8892F: arch/x86/kvm/*/ 8897F: arch/x86/kvm/*/
8893F: arch/x86/include/uapi/asm/kvm* 8898F: arch/x86/include/uapi/asm/kvm*
8899F: arch/x86/include/uapi/asm/vmx.h
8900F: arch/x86/include/uapi/asm/svm.h
8894F: arch/x86/include/asm/kvm* 8901F: arch/x86/include/asm/kvm*
8895F: arch/x86/include/asm/pvclock-abi.h 8902F: arch/x86/include/asm/pvclock-abi.h
8903F: arch/x86/include/asm/svm.h
8904F: arch/x86/include/asm/vmx.h
8896F: arch/x86/kernel/kvm.c 8905F: arch/x86/kernel/kvm.c
8897F: arch/x86/kernel/kvmclock.c 8906F: arch/x86/kernel/kvmclock.c
8898 8907
@@ -9220,6 +9229,18 @@ F: include/linux/nd.h
9220F: include/linux/libnvdimm.h 9229F: include/linux/libnvdimm.h
9221F: include/uapi/linux/ndctl.h 9230F: include/uapi/linux/ndctl.h
9222 9231
9232LICENSES and SPDX stuff
9233M: Thomas Gleixner <tglx@linutronix.de>
9234M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
9235L: linux-spdx@vger.kernel.org
9236S: Maintained
9237T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
9238F: COPYING
9239F: Documentation/process/license-rules.rst
9240F: LICENSES/
9241F: scripts/spdxcheck-test.sh
9242F: scripts/spdxcheck.py
9243
9223LIGHTNVM PLATFORM SUPPORT 9244LIGHTNVM PLATFORM SUPPORT
9224M: Matias Bjorling <mb@lightnvm.io> 9245M: Matias Bjorling <mb@lightnvm.io>
9225W: http://github/OpenChannelSSD 9246W: http://github/OpenChannelSSD
@@ -11072,7 +11093,7 @@ NET_FAILOVER MODULE
11072M: Sridhar Samudrala <sridhar.samudrala@intel.com> 11093M: Sridhar Samudrala <sridhar.samudrala@intel.com>
11073L: netdev@vger.kernel.org 11094L: netdev@vger.kernel.org
11074S: Supported 11095S: Supported
11075F: driver/net/net_failover.c 11096F: drivers/net/net_failover.c
11076F: include/net/net_failover.h 11097F: include/net/net_failover.h
11077F: Documentation/networking/net_failover.rst 11098F: Documentation/networking/net_failover.rst
11078 11099
@@ -11144,6 +11165,7 @@ L: netdev@vger.kernel.org
11144S: Maintained 11165S: Maintained
11145W: https://fedorahosted.org/dropwatch/ 11166W: https://fedorahosted.org/dropwatch/
11146F: net/core/drop_monitor.c 11167F: net/core/drop_monitor.c
11168F: include/uapi/linux/net_dropmon.h
11147 11169
11148NETWORKING DRIVERS 11170NETWORKING DRIVERS
11149M: "David S. Miller" <davem@davemloft.net> 11171M: "David S. Miller" <davem@davemloft.net>
@@ -11282,6 +11304,7 @@ M: Aviad Yehezkel <aviadye@mellanox.com>
11282M: Dave Watson <davejwatson@fb.com> 11304M: Dave Watson <davejwatson@fb.com>
11283M: John Fastabend <john.fastabend@gmail.com> 11305M: John Fastabend <john.fastabend@gmail.com>
11284M: Daniel Borkmann <daniel@iogearbox.net> 11306M: Daniel Borkmann <daniel@iogearbox.net>
11307M: Jakub Kicinski <jakub.kicinski@netronome.com>
11285L: netdev@vger.kernel.org 11308L: netdev@vger.kernel.org
11286S: Maintained 11309S: Maintained
11287F: net/tls/* 11310F: net/tls/*
@@ -12137,7 +12160,7 @@ M: Thomas Hellstrom <thellstrom@vmware.com>
12137M: "VMware, Inc." <pv-drivers@vmware.com> 12160M: "VMware, Inc." <pv-drivers@vmware.com>
12138L: virtualization@lists.linux-foundation.org 12161L: virtualization@lists.linux-foundation.org
12139S: Supported 12162S: Supported
12140F: Documentation/virtual/paravirt_ops.txt 12163F: Documentation/virt/paravirt_ops.rst
12141F: arch/*/kernel/paravirt* 12164F: arch/*/kernel/paravirt*
12142F: arch/*/include/asm/paravirt*.h 12165F: arch/*/include/asm/paravirt*.h
12143F: include/linux/hypervisor.h 12166F: include/linux/hypervisor.h
@@ -12394,7 +12417,7 @@ F: Documentation/PCI/pci-error-recovery.rst
12394F: drivers/pci/pcie/aer.c 12417F: drivers/pci/pcie/aer.c
12395F: drivers/pci/pcie/dpc.c 12418F: drivers/pci/pcie/dpc.c
12396F: drivers/pci/pcie/err.c 12419F: drivers/pci/pcie/err.c
12397F: Documentation/powerpc/eeh-pci-error-recovery.txt 12420F: Documentation/powerpc/eeh-pci-error-recovery.rst
12398F: arch/powerpc/kernel/eeh*.c 12421F: arch/powerpc/kernel/eeh*.c
12399F: arch/powerpc/platforms/*/eeh*.c 12422F: arch/powerpc/platforms/*/eeh*.c
12400F: arch/powerpc/include/*/eeh*.h 12423F: arch/powerpc/include/*/eeh*.h
@@ -13725,6 +13748,7 @@ F: drivers/mtd/nand/raw/r852.c
13725F: drivers/mtd/nand/raw/r852.h 13748F: drivers/mtd/nand/raw/r852.h
13726 13749
13727RISC-V ARCHITECTURE 13750RISC-V ARCHITECTURE
13751M: Paul Walmsley <paul.walmsley@sifive.com>
13728M: Palmer Dabbelt <palmer@sifive.com> 13752M: Palmer Dabbelt <palmer@sifive.com>
13729M: Albert Ou <aou@eecs.berkeley.edu> 13753M: Albert Ou <aou@eecs.berkeley.edu>
13730L: linux-riscv@lists.infradead.org 13754L: linux-riscv@lists.infradead.org
@@ -13947,7 +13971,6 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
13947 13971
13948S390 VFIO-CCW DRIVER 13972S390 VFIO-CCW DRIVER
13949M: Cornelia Huck <cohuck@redhat.com> 13973M: Cornelia Huck <cohuck@redhat.com>
13950M: Farhan Ali <alifm@linux.ibm.com>
13951M: Eric Farman <farman@linux.ibm.com> 13974M: Eric Farman <farman@linux.ibm.com>
13952R: Halil Pasic <pasic@linux.ibm.com> 13975R: Halil Pasic <pasic@linux.ibm.com>
13953L: linux-s390@vger.kernel.org 13976L: linux-s390@vger.kernel.org
@@ -14016,6 +14039,12 @@ F: drivers/media/common/saa7146/
14016F: drivers/media/pci/saa7146/ 14039F: drivers/media/pci/saa7146/
14017F: include/media/drv-intf/saa7146* 14040F: include/media/drv-intf/saa7146*
14018 14041
14042SAFESETID SECURITY MODULE
14043M: Micah Morton <mortonm@chromium.org>
14044S: Supported
14045F: security/safesetid/
14046F: Documentation/admin-guide/LSM/SafeSetID.rst
14047
14019SAMSUNG AUDIO (ASoC) DRIVERS 14048SAMSUNG AUDIO (ASoC) DRIVERS
14020M: Krzysztof Kozlowski <krzk@kernel.org> 14049M: Krzysztof Kozlowski <krzk@kernel.org>
14021M: Sangbeom Kim <sbkim73@samsung.com> 14050M: Sangbeom Kim <sbkim73@samsung.com>
@@ -14456,6 +14485,7 @@ F: drivers/net/phy/phylink.c
14456F: drivers/net/phy/sfp* 14485F: drivers/net/phy/sfp*
14457F: include/linux/phylink.h 14486F: include/linux/phylink.h
14458F: include/linux/sfp.h 14487F: include/linux/sfp.h
14488K: phylink
14459 14489
14460SGI GRU DRIVER 14490SGI GRU DRIVER
14461M: Dimitri Sivanich <sivanich@sgi.com> 14491M: Dimitri Sivanich <sivanich@sgi.com>
@@ -14861,9 +14891,9 @@ F: include/linux/arm_sdei.h
14861F: include/uapi/linux/arm_sdei.h 14891F: include/uapi/linux/arm_sdei.h
14862 14892
14863SOFTWARE RAID (Multiple Disks) SUPPORT 14893SOFTWARE RAID (Multiple Disks) SUPPORT
14864M: Shaohua Li <shli@kernel.org> 14894M: Song Liu <song@kernel.org>
14865L: linux-raid@vger.kernel.org 14895L: linux-raid@vger.kernel.org
14866T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git 14896T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
14867S: Supported 14897S: Supported
14868F: drivers/md/Makefile 14898F: drivers/md/Makefile
14869F: drivers/md/Kconfig 14899F: drivers/md/Kconfig
@@ -16078,7 +16108,7 @@ S: Maintained
16078F: drivers/net/ethernet/ti/netcp* 16108F: drivers/net/ethernet/ti/netcp*
16079 16109
16080TI PCM3060 ASoC CODEC DRIVER 16110TI PCM3060 ASoC CODEC DRIVER
16081M: Kirill Marinushkin <kmarinushkin@birdec.tech> 16111M: Kirill Marinushkin <kmarinushkin@birdec.com>
16082L: alsa-devel@alsa-project.org (moderated for non-subscribers) 16112L: alsa-devel@alsa-project.org (moderated for non-subscribers)
16083S: Maintained 16113S: Maintained
16084F: Documentation/devicetree/bindings/sound/pcm3060.txt 16114F: Documentation/devicetree/bindings/sound/pcm3060.txt
@@ -16854,7 +16884,7 @@ W: http://user-mode-linux.sourceforge.net
16854Q: https://patchwork.ozlabs.org/project/linux-um/list/ 16884Q: https://patchwork.ozlabs.org/project/linux-um/list/
16855T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git 16885T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
16856S: Maintained 16886S: Maintained
16857F: Documentation/virtual/uml/ 16887F: Documentation/virt/uml/
16858F: arch/um/ 16888F: arch/um/
16859F: arch/x86/um/ 16889F: arch/x86/um/
16860F: fs/hostfs/ 16890F: fs/hostfs/
@@ -17123,7 +17153,7 @@ F: drivers/virtio/virtio_input.c
17123F: include/uapi/linux/virtio_input.h 17153F: include/uapi/linux/virtio_input.h
17124 17154
17125VIRTIO IOMMU DRIVER 17155VIRTIO IOMMU DRIVER
17126M: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> 17156M: Jean-Philippe Brucker <jean-philippe@linaro.org>
17127L: virtualization@lists.linux-foundation.org 17157L: virtualization@lists.linux-foundation.org
17128S: Maintained 17158S: Maintained
17129F: drivers/iommu/virtio-iommu.c 17159F: drivers/iommu/virtio-iommu.c
@@ -17172,7 +17202,6 @@ F: drivers/vme/
17172F: include/linux/vme* 17202F: include/linux/vme*
17173 17203
17174VMWARE BALLOON DRIVER 17204VMWARE BALLOON DRIVER
17175M: Julien Freche <jfreche@vmware.com>
17176M: Nadav Amit <namit@vmware.com> 17205M: Nadav Amit <namit@vmware.com>
17177M: "VMware, Inc." <pv-drivers@vmware.com> 17206M: "VMware, Inc." <pv-drivers@vmware.com>
17178L: linux-kernel@vger.kernel.org 17207L: linux-kernel@vger.kernel.org
@@ -17555,7 +17584,6 @@ M: Jakub Kicinski <jakub.kicinski@netronome.com>
17555M: Jesper Dangaard Brouer <hawk@kernel.org> 17584M: Jesper Dangaard Brouer <hawk@kernel.org>
17556M: John Fastabend <john.fastabend@gmail.com> 17585M: John Fastabend <john.fastabend@gmail.com>
17557L: netdev@vger.kernel.org 17586L: netdev@vger.kernel.org
17558L: xdp-newbies@vger.kernel.org
17559L: bpf@vger.kernel.org 17587L: bpf@vger.kernel.org
17560S: Supported 17588S: Supported
17561F: net/core/xdp.c 17589F: net/core/xdp.c
diff --git a/Makefile b/Makefile
index 9be5834073f8..0cbe8717bdb3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc7
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -419,6 +419,7 @@ NM = $(CROSS_COMPILE)nm
419STRIP = $(CROSS_COMPILE)strip 419STRIP = $(CROSS_COMPILE)strip
420OBJCOPY = $(CROSS_COMPILE)objcopy 420OBJCOPY = $(CROSS_COMPILE)objcopy
421OBJDUMP = $(CROSS_COMPILE)objdump 421OBJDUMP = $(CROSS_COMPILE)objdump
422OBJSIZE = $(CROSS_COMPILE)size
422PAHOLE = pahole 423PAHOLE = pahole
423LEX = flex 424LEX = flex
424YACC = bison 425YACC = bison
@@ -472,11 +473,12 @@ KBUILD_CFLAGS_MODULE := -DMODULE
472KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds 473KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
473KBUILD_LDFLAGS := 474KBUILD_LDFLAGS :=
474GCC_PLUGINS_CFLAGS := 475GCC_PLUGINS_CFLAGS :=
476CLANG_FLAGS :=
475 477
476export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC 478export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
477export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS 479export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL
478export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE 480export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
479export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS 481export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
480 482
481export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS 483export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
482export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE 484export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
@@ -519,7 +521,7 @@ endif
519 521
520ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) 522ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
521ifneq ($(CROSS_COMPILE),) 523ifneq ($(CROSS_COMPILE),)
522CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) 524CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
523GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) 525GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
524CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) 526CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
525GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) 527GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
@@ -843,6 +845,9 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
843# warn about C99 declaration after statement 845# warn about C99 declaration after statement
844KBUILD_CFLAGS += -Wdeclaration-after-statement 846KBUILD_CFLAGS += -Wdeclaration-after-statement
845 847
848# Warn about unmarked fall-throughs in switch statement.
849KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
850
846# Variable Length Arrays (VLAs) should not be used anywhere in the kernel 851# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
847KBUILD_CFLAGS += -Wvla 852KBUILD_CFLAGS += -Wvla
848 853
@@ -998,6 +1003,8 @@ endif
998 1003
999PHONY += prepare0 1004PHONY += prepare0
1000 1005
1006export MODORDER := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order
1007
1001ifeq ($(KBUILD_EXTMOD),) 1008ifeq ($(KBUILD_EXTMOD),)
1002core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ 1009core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
1003 1010
@@ -1767,13 +1774,22 @@ build-dir = $(patsubst %/,%,$(dir $(build-target)))
1767 $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) 1774 $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
1768%.symtypes: prepare FORCE 1775%.symtypes: prepare FORCE
1769 $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) 1776 $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
1777ifeq ($(KBUILD_EXTMOD),)
1778# For the single build of an in-tree module, use a temporary file to avoid
1779# the situation of modules_install installing an invalid modules.order.
1780%.ko: MODORDER := .modules.tmp
1781endif
1782%.ko: prepare FORCE
1783 $(Q)$(MAKE) $(build)=$(build-dir) $(build-target:.ko=.mod)
1784 $(Q)echo $(build-target) > $(MODORDER)
1785 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
1770 1786
1771# Modules 1787# Modules
1772PHONY += / 1788PHONY += /
1773/: ./ 1789/: ./
1774 1790
1775%/: prepare FORCE 1791%/: prepare FORCE
1776 $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) 1792 $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) need-modorder=1
1777 1793
1778# FIXME Should go into a make.lib or something 1794# FIXME Should go into a make.lib or something
1779# =========================================================================== 1795# ===========================================================================
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index a83c4f5e928b..8483a86c743d 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb
12# for CONFIG_OF_ALL_DTBS test 12# for CONFIG_OF_ALL_DTBS test
13dtstree := $(srctree)/$(src) 13dtstree := $(srctree)/$(src)
14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) 14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
15
16# board-specific dtc flags
17DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index f5ae394ebe06..41b16f21beec 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -256,7 +256,7 @@
256 256
257.macro FAKE_RET_FROM_EXCPN 257.macro FAKE_RET_FROM_EXCPN
258 lr r9, [status32] 258 lr r9, [status32]
259 bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) 259 bic r9, r9, STATUS_AE_MASK
260 or r9, r9, STATUS_IE_MASK 260 or r9, r9, STATUS_IE_MASK
261 kflag r9 261 kflag r9
262.endm 262.endm
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index a0eeb9f8f0a9..d9ee43c6b7db 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -62,15 +62,15 @@
62#else /* !__ASSEMBLY__ */ 62#else /* !__ASSEMBLY__ */
63 63
64#ifdef CONFIG_ARC_HAS_ICCM 64#ifdef CONFIG_ARC_HAS_ICCM
65#define __arcfp_code __attribute__((__section__(".text.arcfp"))) 65#define __arcfp_code __section(.text.arcfp)
66#else 66#else
67#define __arcfp_code __attribute__((__section__(".text"))) 67#define __arcfp_code __section(.text)
68#endif 68#endif
69 69
70#ifdef CONFIG_ARC_HAS_DCCM 70#ifdef CONFIG_ARC_HAS_DCCM
71#define __arcfp_data __attribute__((__section__(".data.arcfp"))) 71#define __arcfp_data __section(.data.arcfp)
72#else 72#else
73#define __arcfp_data __attribute__((__section__(".data"))) 73#define __arcfp_data __section(.data)
74#endif 74#endif
75 75
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 8ac0e2ac3e70..73746ed5b834 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
53 */ 53 */
54#define MACHINE_START(_type, _name) \ 54#define MACHINE_START(_type, _name) \
55static const struct machine_desc __mach_desc_##_type \ 55static const struct machine_desc __mach_desc_##_type \
56__used \ 56__used __section(.arch.info.init) = { \
57__attribute__((__section__(".arch.info.init"))) = { \
58 .name = _name, 57 .name = _name,
59 58
60#define MACHINE_END \ 59#define MACHINE_END \
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 18b493dfb3a8..abf9398cc333 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
203} 203}
204 204
205static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 205static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
206 unsigned int distr) 206 bool set_distr, unsigned int distr)
207{ 207{
208 union { 208 union {
209 unsigned int word; 209 unsigned int word;
@@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
212 }; 212 };
213 } data; 213 } data;
214 214
215 data.distr = distr; 215 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
216 data.lvl = lvl; 216 if (set_distr)
217 data.distr = distr;
218 if (set_lvl)
219 data.lvl = lvl;
217 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
218} 221}
219 222
@@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data)
240 raw_spin_unlock_irqrestore(&mcip_lock, flags); 243 raw_spin_unlock_irqrestore(&mcip_lock, flags);
241} 244}
242 245
246static void idu_irq_ack(struct irq_data *data)
247{
248 unsigned long flags;
249
250 raw_spin_lock_irqsave(&mcip_lock, flags);
251 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
252 raw_spin_unlock_irqrestore(&mcip_lock, flags);
253}
254
255static void idu_irq_mask_ack(struct irq_data *data)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&mcip_lock, flags);
260 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
261 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
262 raw_spin_unlock_irqrestore(&mcip_lock, flags);
263}
264
243static int 265static int
244idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 266idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
245 bool force) 267 bool force)
@@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
263 else 285 else
264 distribution_mode = IDU_M_DISTRI_RR; 286 distribution_mode = IDU_M_DISTRI_RR;
265 287
266 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 288 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
267 289
268 raw_spin_unlock_irqrestore(&mcip_lock, flags); 290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
269 291
270 return IRQ_SET_MASK_OK; 292 return IRQ_SET_MASK_OK;
271} 293}
272 294
295static int idu_irq_set_type(struct irq_data *data, u32 type)
296{
297 unsigned long flags;
298
299 /*
300 * ARCv2 IDU HW does not support inverse polarity, so these are the
301 * only interrupt types supported.
302 */
303 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
304 return -EINVAL;
305
306 raw_spin_lock_irqsave(&mcip_lock, flags);
307
308 idu_set_mode(data->hwirq, true,
309 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
310 IDU_M_TRIG_LEVEL,
311 false, 0);
312
313 raw_spin_unlock_irqrestore(&mcip_lock, flags);
314
315 return 0;
316}
317
273static void idu_irq_enable(struct irq_data *data) 318static void idu_irq_enable(struct irq_data *data)
274{ 319{
275 /* 320 /*
@@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = {
289 .name = "MCIP IDU Intc", 334 .name = "MCIP IDU Intc",
290 .irq_mask = idu_irq_mask, 335 .irq_mask = idu_irq_mask,
291 .irq_unmask = idu_irq_unmask, 336 .irq_unmask = idu_irq_unmask,
337 .irq_ack = idu_irq_ack,
338 .irq_mask_ack = idu_irq_mask_ack,
292 .irq_enable = idu_irq_enable, 339 .irq_enable = idu_irq_enable,
340 .irq_set_type = idu_irq_set_type,
293#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
294 .irq_set_affinity = idu_irq_set_affinity, 342 .irq_set_affinity = idu_irq_set_affinity,
295#endif 343#endif
@@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t
317} 365}
318 366
319static const struct irq_domain_ops idu_irq_ops = { 367static const struct irq_domain_ops idu_irq_ops = {
320 .xlate = irq_domain_xlate_onecell, 368 .xlate = irq_domain_xlate_onetwocell,
321 .map = idu_irq_map, 369 .map = idu_irq_map,
322}; 370};
323 371
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index c2663fce7f6c..dc05a63516f5 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,6 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
572#else 572#else
573 BUILD_BUG_ON(sizeof(u32) != sizeof(value)); 573 BUILD_BUG_ON(sizeof(u32) != sizeof(value));
574#endif 574#endif
575 /* Fall through */
575 case DW_EH_PE_native: 576 case DW_EH_PE_native:
576 if (end < (const void *)(ptr.pul + 1)) 577 if (end < (const void *)(ptr.pul + 1))
577 return 0; 578 return 0;
@@ -826,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
826 case DW_CFA_def_cfa: 827 case DW_CFA_def_cfa:
827 state->cfa.reg = get_uleb128(&ptr.p8, end); 828 state->cfa.reg = get_uleb128(&ptr.p8, end);
828 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); 829 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
829 /*nobreak*/ 830 /* fall through */
830 case DW_CFA_def_cfa_offset: 831 case DW_CFA_def_cfa_offset:
831 state->cfa.offs = get_uleb128(&ptr.p8, end); 832 state->cfa.offs = get_uleb128(&ptr.p8, end);
832 unw_debug("cfa_def_cfa_offset: 0x%lx ", 833 unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -834,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
834 break; 835 break;
835 case DW_CFA_def_cfa_sf: 836 case DW_CFA_def_cfa_sf:
836 state->cfa.reg = get_uleb128(&ptr.p8, end); 837 state->cfa.reg = get_uleb128(&ptr.p8, end);
837 /*nobreak */ 838 /* fall through */
838 case DW_CFA_def_cfa_offset_sf: 839 case DW_CFA_def_cfa_offset_sf:
839 state->cfa.offs = get_sleb128(&ptr.p8, end) 840 state->cfa.offs = get_sleb128(&ptr.p8, end)
840 * state->dataAlign; 841 * state->dataAlign;
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 62c210e7ee4c..70a3fbe79fba 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
101 if (is_isa_arcv2() && ioc_enable && coherent) 101 if (is_isa_arcv2() && ioc_enable && coherent)
102 dev->dma_coherent = true; 102 dev->dma_coherent = true;
103 103
104 dev_info(dev, "use %sncoherent DMA ops\n", 104 dev_info(dev, "use %scoherent DMA ops\n",
105 dev->dma_coherent ? "" : "non"); 105 dev->dma_coherent ? "" : "non");
106} 106}
107 107
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 7dd2dd335cf6..0b961a2a10b8 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -6,11 +6,15 @@
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/of_fdt.h>
10#include <linux/libfdt.h>
9#include <linux/smp.h> 11#include <linux/smp.h>
10#include <asm/arcregs.h> 12#include <asm/arcregs.h>
11#include <asm/io.h> 13#include <asm/io.h>
12#include <asm/mach_desc.h> 14#include <asm/mach_desc.h>
13 15
16int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
17
14#define ARC_CCM_UNUSED_ADDR 0x60000000 18#define ARC_CCM_UNUSED_ADDR 0x60000000
15 19
16static void __init hsdk_init_per_cpu(unsigned int cpu) 20static void __init hsdk_init_per_cpu(unsigned int cpu)
@@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void)
97 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); 101 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
98} 102}
99 103
104static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
105{
106 void *fdt = initial_boot_params;
107 const void *prop;
108 int node, ret;
109 bool dt_coh_set;
110
111 node = fdt_path_offset(fdt, path);
112 if (node < 0)
113 goto tweak_fail;
114
115 prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
116 if (!prop && ret != -FDT_ERR_NOTFOUND)
117 goto tweak_fail;
118
119 dt_coh_set = ret != -FDT_ERR_NOTFOUND;
120 ret = 0;
121
122 /* need to remove "dma-coherent" property */
123 if (dt_coh_set && !coherent)
124 ret = fdt_delprop(fdt, node, "dma-coherent");
125
126 /* need to set "dma-coherent" property */
127 if (!dt_coh_set && coherent)
128 ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
129
130 if (ret < 0)
131 goto tweak_fail;
132
133 return 0;
134
135tweak_fail:
136 pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
137 return -EFAULT;
138}
139
100enum hsdk_axi_masters { 140enum hsdk_axi_masters {
101 M_HS_CORE = 0, 141 M_HS_CORE = 0,
102 M_HS_RTT, 142 M_HS_RTT,
@@ -162,6 +202,39 @@ enum hsdk_axi_masters {
162#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) 202#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
163#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) 203#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
164 204
205static void __init hsdk_init_memory_bridge_axi_dmac(void)
206{
207 bool coherent = !!arc_hsdk_axi_dmac_coherent;
208 u32 axi_m_slv1, axi_m_oft1;
209
210 /*
211 * Don't tweak memory bridge configuration if we failed to tweak DTB
212 * as we will end up in a inconsistent state.
213 */
214 if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
215 return;
216
217 if (coherent) {
218 axi_m_slv1 = 0x77999999;
219 axi_m_oft1 = 0x76DCBA98;
220 } else {
221 axi_m_slv1 = 0x77777777;
222 axi_m_oft1 = 0x76543210;
223 }
224
225 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
226 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
227 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
228 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
229 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
230
231 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
233 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
234 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
235 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
236}
237
165static void __init hsdk_init_memory_bridge(void) 238static void __init hsdk_init_memory_bridge(void)
166{ 239{
167 u32 reg; 240 u32 reg;
@@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void)
227 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); 300 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
228 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); 301 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
229 302
230 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
231 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
233 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
234 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
235
236 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
237 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
238 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
239 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
240 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
241
242 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); 303 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
243 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); 304 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
244 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); 305 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
245 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); 306 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
246 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); 307 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
247 308
309 hsdk_init_memory_bridge_axi_dmac();
310
248 /* 311 /*
249 * PAE remapping for DMA clients does not work due to an RTL bug, so 312 * PAE remapping for DMA clients does not work due to an RTL bug, so
250 * CREG_PAE register must be programmed to all zeroes, otherwise it 313 * CREG_PAE register must be programmed to all zeroes, otherwise it
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 33b00579beff..24360211534a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,6 +7,8 @@ config ARM
7 select ARCH_HAS_BINFMT_FLAT 7 select ARCH_HAS_BINFMT_FLAT
8 select ARCH_HAS_DEBUG_VIRTUAL if MMU 8 select ARCH_HAS_DEBUG_VIRTUAL if MMU
9 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 select ARCH_HAS_DEVMEM_IS_ALLOWED
10 select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
11 select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
10 select ARCH_HAS_ELF_RANDOMIZE 12 select ARCH_HAS_ELF_RANDOMIZE
11 select ARCH_HAS_FORTIFY_SOURCE 13 select ARCH_HAS_FORTIFY_SOURCE
12 select ARCH_HAS_KEEPINITRD 14 select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
18 select ARCH_HAS_SET_MEMORY 20 select ARCH_HAS_SET_MEMORY
19 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 21 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
20 select ARCH_HAS_STRICT_MODULE_RWX if MMU 22 select ARCH_HAS_STRICT_MODULE_RWX if MMU
23 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
24 select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU 25 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
22 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 26 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
23 select ARCH_HAVE_CUSTOM_GPIO_H 27 select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index c929bea9a9ff..85710e078afb 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1535,7 +1535,6 @@ config DEBUG_LL_INCLUDE
1535 DEBUG_IMX7D_UART 1535 DEBUG_IMX7D_UART
1536 default "debug/ks8695.S" if DEBUG_KS8695_UART 1536 default "debug/ks8695.S" if DEBUG_KS8695_UART
1537 default "debug/msm.S" if DEBUG_QCOM_UARTDM 1537 default "debug/msm.S" if DEBUG_QCOM_UARTDM
1538 default "debug/netx.S" if DEBUG_NETX_UART
1539 default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART 1538 default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
1540 default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2 1539 default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2
1541 default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF0 1540 default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF0
@@ -1575,7 +1574,6 @@ config DEBUG_UART_8250
1575 1574
1576config DEBUG_UART_PHYS 1575config DEBUG_UART_PHYS
1577 hex "Physical base address of debug UART" 1576 hex "Physical base address of debug UART"
1578 default 0x00100a00 if DEBUG_NETX_UART
1579 default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0 1577 default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0
1580 default 0x01c28000 if DEBUG_SUNXI_UART0 1578 default 0x01c28000 if DEBUG_SUNXI_UART0
1581 default 0x01c28400 if DEBUG_SUNXI_UART1 1579 default 0x01c28400 if DEBUG_SUNXI_UART1
@@ -1700,7 +1698,6 @@ config DEBUG_UART_PHYS
1700 DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ 1698 DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
1701 DEBUG_LL_UART_EFM32 || \ 1699 DEBUG_LL_UART_EFM32 || \
1702 DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ 1700 DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
1703 DEBUG_NETX_UART || \
1704 DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \ 1701 DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \
1705 DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \ 1702 DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \
1706 DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF1 || \ 1703 DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF1 || \
@@ -1717,7 +1714,6 @@ config DEBUG_UART_VIRT
1717 default 0xc881f000 if DEBUG_RV1108_UART2 1714 default 0xc881f000 if DEBUG_RV1108_UART2
1718 default 0xc8821000 if DEBUG_RV1108_UART1 1715 default 0xc8821000 if DEBUG_RV1108_UART1
1719 default 0xc8912000 if DEBUG_RV1108_UART0 1716 default 0xc8912000 if DEBUG_RV1108_UART0
1720 default 0xe0000a00 if DEBUG_NETX_UART
1721 default 0xe0010fe0 if ARCH_RPC 1717 default 0xe0010fe0 if ARCH_RPC
1722 default 0xf0000be0 if ARCH_EBSA110 1718 default 0xf0000be0 if ARCH_EBSA110
1723 default 0xf0010000 if DEBUG_ASM9260_UART 1719 default 0xf0010000 if DEBUG_ASM9260_UART
@@ -1822,7 +1818,6 @@ config DEBUG_UART_VIRT
1822 default DEBUG_UART_PHYS if !MMU 1818 default DEBUG_UART_PHYS if !MMU
1823 depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ 1819 depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
1824 DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ 1820 DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
1825 DEBUG_NETX_UART || \
1826 DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \ 1821 DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
1827 DEBUG_S3C64XX_UART || \ 1822 DEBUG_S3C64XX_UART || \
1828 DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \ 1823 DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index ced1a19d5f89..46849d6ecb3e 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -185,7 +185,7 @@
185 uart0: serial@0 { 185 uart0: serial@0 {
186 compatible = "ti,am3352-uart", "ti,omap3-uart"; 186 compatible = "ti,am3352-uart", "ti,omap3-uart";
187 clock-frequency = <48000000>; 187 clock-frequency = <48000000>;
188 reg = <0x0 0x2000>; 188 reg = <0x0 0x1000>;
189 interrupts = <72>; 189 interrupts = <72>;
190 status = "disabled"; 190 status = "disabled";
191 dmas = <&edma 26 0>, <&edma 27 0>; 191 dmas = <&edma 26 0>, <&edma 27 0>;
@@ -934,7 +934,7 @@
934 uart1: serial@0 { 934 uart1: serial@0 {
935 compatible = "ti,am3352-uart", "ti,omap3-uart"; 935 compatible = "ti,am3352-uart", "ti,omap3-uart";
936 clock-frequency = <48000000>; 936 clock-frequency = <48000000>;
937 reg = <0x0 0x2000>; 937 reg = <0x0 0x1000>;
938 interrupts = <73>; 938 interrupts = <73>;
939 status = "disabled"; 939 status = "disabled";
940 dmas = <&edma 28 0>, <&edma 29 0>; 940 dmas = <&edma 28 0>, <&edma 29 0>;
@@ -966,7 +966,7 @@
966 uart2: serial@0 { 966 uart2: serial@0 {
967 compatible = "ti,am3352-uart", "ti,omap3-uart"; 967 compatible = "ti,am3352-uart", "ti,omap3-uart";
968 clock-frequency = <48000000>; 968 clock-frequency = <48000000>;
969 reg = <0x0 0x2000>; 969 reg = <0x0 0x1000>;
970 interrupts = <74>; 970 interrupts = <74>;
971 status = "disabled"; 971 status = "disabled";
972 dmas = <&edma 30 0>, <&edma 31 0>; 972 dmas = <&edma 30 0>, <&edma 31 0>;
@@ -1614,7 +1614,7 @@
1614 uart3: serial@0 { 1614 uart3: serial@0 {
1615 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1615 compatible = "ti,am3352-uart", "ti,omap3-uart";
1616 clock-frequency = <48000000>; 1616 clock-frequency = <48000000>;
1617 reg = <0x0 0x2000>; 1617 reg = <0x0 0x1000>;
1618 interrupts = <44>; 1618 interrupts = <44>;
1619 status = "disabled"; 1619 status = "disabled";
1620 }; 1620 };
@@ -1644,7 +1644,7 @@
1644 uart4: serial@0 { 1644 uart4: serial@0 {
1645 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1645 compatible = "ti,am3352-uart", "ti,omap3-uart";
1646 clock-frequency = <48000000>; 1646 clock-frequency = <48000000>;
1647 reg = <0x0 0x2000>; 1647 reg = <0x0 0x1000>;
1648 interrupts = <45>; 1648 interrupts = <45>;
1649 status = "disabled"; 1649 status = "disabled";
1650 }; 1650 };
@@ -1674,7 +1674,7 @@
1674 uart5: serial@0 { 1674 uart5: serial@0 {
1675 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1675 compatible = "ti,am3352-uart", "ti,omap3-uart";
1676 clock-frequency = <48000000>; 1676 clock-frequency = <48000000>;
1677 reg = <0x0 0x2000>; 1677 reg = <0x0 0x1000>;
1678 interrupts = <46>; 1678 interrupts = <46>;
1679 status = "disabled"; 1679 status = "disabled";
1680 }; 1680 };
@@ -1758,6 +1758,8 @@
1758 1758
1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */ 1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
1760 compatible = "ti,sysc-omap4", "ti,sysc"; 1760 compatible = "ti,sysc-omap4", "ti,sysc";
1761 reg = <0xcc020 0x4>;
1762 reg-names = "rev";
1761 ti,hwmods = "d_can0"; 1763 ti,hwmods = "d_can0";
1762 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1764 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1763 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>, 1765 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
@@ -1780,6 +1782,8 @@
1780 1782
1781 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */ 1783 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
1782 compatible = "ti,sysc-omap4", "ti,sysc"; 1784 compatible = "ti,sysc-omap4", "ti,sysc";
1785 reg = <0xd0020 0x4>;
1786 reg-names = "rev";
1783 ti,hwmods = "d_can1"; 1787 ti,hwmods = "d_can1";
1784 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1788 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1785 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>, 1789 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index e5c2f71a7c77..fb6b8aa12cc5 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -234,13 +234,33 @@
234 interrupt-names = "edma3_tcerrint"; 234 interrupt-names = "edma3_tcerrint";
235 }; 235 };
236 236
237 mmc3: mmc@47810000 { 237 target-module@47810000 {
238 compatible = "ti,omap4-hsmmc"; 238 compatible = "ti,sysc-omap2", "ti,sysc";
239 ti,hwmods = "mmc3"; 239 ti,hwmods = "mmc3";
240 ti,needs-special-reset; 240 reg = <0x478102fc 0x4>,
241 interrupts = <29>; 241 <0x47810110 0x4>,
242 reg = <0x47810000 0x1000>; 242 <0x47810114 0x4>;
243 status = "disabled"; 243 reg-names = "rev", "sysc", "syss";
244 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
245 SYSC_OMAP2_ENAWAKEUP |
246 SYSC_OMAP2_SOFTRESET |
247 SYSC_OMAP2_AUTOIDLE)>;
248 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
249 <SYSC_IDLE_NO>,
250 <SYSC_IDLE_SMART>;
251 ti,syss-mask = <1>;
252 clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
253 clock-names = "fck";
254 #address-cells = <1>;
255 #size-cells = <1>;
256 ranges = <0x0 0x47810000 0x1000>;
257
258 mmc3: mmc@0 {
259 compatible = "ti,omap4-hsmmc";
260 ti,needs-special-reset;
261 interrupts = <29>;
262 reg = <0x0 0x1000>;
263 };
244 }; 264 };
245 265
246 usb: usb@47400000 { 266 usb: usb@47400000 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 55aff4db9c7c..848e2a8884e2 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -228,13 +228,33 @@
228 interrupt-names = "edma3_tcerrint"; 228 interrupt-names = "edma3_tcerrint";
229 }; 229 };
230 230
231 mmc3: mmc@47810000 { 231 target-module@47810000 {
232 compatible = "ti,omap4-hsmmc"; 232 compatible = "ti,sysc-omap2", "ti,sysc";
233 reg = <0x47810000 0x1000>;
234 ti,hwmods = "mmc3"; 233 ti,hwmods = "mmc3";
235 ti,needs-special-reset; 234 reg = <0x478102fc 0x4>,
236 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; 235 <0x47810110 0x4>,
237 status = "disabled"; 236 <0x47810114 0x4>;
237 reg-names = "rev", "sysc", "syss";
238 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
239 SYSC_OMAP2_ENAWAKEUP |
240 SYSC_OMAP2_SOFTRESET |
241 SYSC_OMAP2_AUTOIDLE)>;
242 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
243 <SYSC_IDLE_NO>,
244 <SYSC_IDLE_SMART>;
245 ti,syss-mask = <1>;
246 clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
247 clock-names = "fck";
248 #address-cells = <1>;
249 #size-cells = <1>;
250 ranges = <0x0 0x47810000 0x1000>;
251
252 mmc3: mmc@0 {
253 compatible = "ti,omap4-hsmmc";
254 ti,needs-special-reset;
255 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
256 reg = <0x0 0x1000>;
257 };
238 }; 258 };
239 259
240 sham: sham@53100000 { 260 sham: sham@53100000 {
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index 989cb60b9029..04bee4ff9dcb 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -1574,6 +1574,8 @@
1574 1574
1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ 1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
1576 compatible = "ti,sysc-omap4", "ti,sysc"; 1576 compatible = "ti,sysc-omap4", "ti,sysc";
1577 reg = <0xcc020 0x4>;
1578 reg-names = "rev";
1577 ti,hwmods = "d_can0"; 1579 ti,hwmods = "d_can0";
1578 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1580 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1579 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; 1581 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
@@ -1593,6 +1595,8 @@
1593 1595
1594 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ 1596 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
1595 compatible = "ti,sysc-omap4", "ti,sysc"; 1597 compatible = "ti,sysc-omap4", "ti,sysc";
1598 reg = <0xd0020 0x4>;
1599 reg-names = "rev";
1596 ti,hwmods = "d_can1"; 1600 ti,hwmods = "d_can1";
1597 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1601 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1598 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; 1602 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 1d5e99964bbf..0aaacea1d887 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -175,14 +175,9 @@
175}; 175};
176 176
177&mmc1 { 177&mmc1 {
178 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 178 pinctrl-names = "default", "hs";
179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
180 pinctrl-1 = <&mmc1_pins_hs>; 180 pinctrl-1 = <&mmc1_pins_hs>;
181 pinctrl-2 = <&mmc1_pins_sdr12>;
182 pinctrl-3 = <&mmc1_pins_sdr25>;
183 pinctrl-4 = <&mmc1_pins_sdr50>;
184 pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
185 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
186}; 181};
187 182
188&mmc2 { 183&mmc2 {
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index c65d7f6d3b5a..ea1c119feaa5 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27}; 22};
28 23
29&mmc2 { 24&mmc2 {
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index dc5141c35610..7935d70874ce 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -24,14 +24,9 @@
24}; 24};
25 25
26&mmc1 { 26&mmc1 {
27 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 27 pinctrl-names = "default", "hs";
28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
29 pinctrl-1 = <&mmc1_pins_hs>; 29 pinctrl-1 = <&mmc1_pins_hs>;
30 pinctrl-2 = <&mmc1_pins_default>;
31 pinctrl-3 = <&mmc1_pins_hs>;
32 pinctrl-4 = <&mmc1_pins_sdr50>;
33 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
34 pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
35}; 30};
36 31
37&mmc2 { 32&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index d02f5fa61e5f..bc76f1705c0f 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -379,7 +379,7 @@
379 }; 379 };
380}; 380};
381 381
382&gpio7 { 382&gpio7_target {
383 ti,no-reset-on-init; 383 ti,no-reset-on-init;
384 ti,no-idle-on-init; 384 ti,no-idle-on-init;
385}; 385};
@@ -430,6 +430,7 @@
430 430
431 bus-width = <4>; 431 bus-width = <4>;
432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ 432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
433 no-1-8-v;
433}; 434};
434 435
435&mmc2 { 436&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
index a374b5cd6db0..7b113b52c3fb 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
index 4badd2144db9..30c500b15b21 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 18d0ae46e76c..0faae8950375 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -124,6 +124,9 @@
124 }; 124 };
125 125
126 mdio-bus-mux { 126 mdio-bus-mux {
127 #address-cells = <1>;
128 #size-cells = <0>;
129
127 /* BIT(9) = 1 => external mdio */ 130 /* BIT(9) = 1 => external mdio */
128 mdio_ext: mdio@200 { 131 mdio_ext: mdio@200 {
129 reg = <0x200>; 132 reg = <0x200>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 714e971b912a..de7f85efaa51 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -498,7 +498,7 @@
498 phy-supply = <&ldousb_reg>; 498 phy-supply = <&ldousb_reg>;
499}; 499};
500 500
501&gpio7 { 501&gpio7_target {
502 ti,no-reset-on-init; 502 ti,no-reset-on-init;
503 ti,no-idle-on-init; 503 ti,no-idle-on-init;
504}; 504};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 23faedec08ab..21e5914fdd62 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1261,7 +1261,7 @@
1261 }; 1261 };
1262 }; 1262 };
1263 1263
1264 target-module@51000 { /* 0x48051000, ap 45 2e.0 */ 1264 gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
1265 compatible = "ti,sysc-omap2", "ti,sysc"; 1265 compatible = "ti,sysc-omap2", "ti,sysc";
1266 ti,hwmods = "gpio7"; 1266 ti,hwmods = "gpio7";
1267 reg = <0x51000 0x4>, 1267 reg = <0x51000 0x4>,
@@ -3025,7 +3025,7 @@
3025 3025
3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */ 3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */
3027 compatible = "ti,sysc-omap4", "ti,sysc"; 3027 compatible = "ti,sysc-omap4", "ti,sysc";
3028 reg = <0x80000 0x4>; 3028 reg = <0x80020 0x4>;
3029 reg-names = "rev"; 3029 reg-names = "rev";
3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>; 3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
3031 clock-names = "fck"; 3031 clock-names = "fck";
@@ -4577,7 +4577,7 @@
4577 4577
4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */ 4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
4579 compatible = "ti,sysc-omap4", "ti,sysc"; 4579 compatible = "ti,sysc-omap4", "ti,sysc";
4580 reg = <0xc000 0x4>; 4580 reg = <0xc020 0x4>;
4581 reg-names = "rev"; 4581 reg-names = "rev";
4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>; 4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
4583 clock-names = "fck"; 4583 clock-names = "fck";
diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
index 28ebb4eb884a..214b9e6de2c3 100644
--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
@@ -32,7 +32,7 @@
32 * 32 *
33 * Datamanual Revisions: 33 * Datamanual Revisions:
34 * 34 *
35 * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 35 * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
37 * 37 *
38 */ 38 */
@@ -229,45 +229,45 @@
229 229
230 mmc3_pins_default: mmc3_pins_default { 230 mmc3_pins_default: mmc3_pins_default {
231 pinctrl-single,pins = < 231 pinctrl-single,pins = <
232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
238 >; 238 >;
239 }; 239 };
240 240
241 mmc3_pins_hs: mmc3_pins_hs { 241 mmc3_pins_hs: mmc3_pins_hs {
242 pinctrl-single,pins = < 242 pinctrl-single,pins = <
243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
249 >; 249 >;
250 }; 250 };
251 251
252 mmc3_pins_sdr12: mmc3_pins_sdr12 { 252 mmc3_pins_sdr12: mmc3_pins_sdr12 {
253 pinctrl-single,pins = < 253 pinctrl-single,pins = <
254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
260 >; 260 >;
261 }; 261 };
262 262
263 mmc3_pins_sdr25: mmc3_pins_sdr25 { 263 mmc3_pins_sdr25: mmc3_pins_sdr25 {
264 pinctrl-single,pins = < 264 pinctrl-single,pins = <
265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
271 >; 271 >;
272 }; 272 };
273 273
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index cbe61b61a212..c2a9dd57e56a 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -112,7 +112,7 @@
112}; 112};
113 113
114&i2c2 { 114&i2c2 {
115 clock_frequency = <100000>; 115 clock-frequency = <100000>;
116 pinctrl-names = "default"; 116 pinctrl-names = "default";
117 pinctrl-0 = <&pinctrl_i2c2>; 117 pinctrl-0 = <&pinctrl_i2c2>;
118 status = "okay"; 118 status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts
index 21ddd359d3ed..9f63706383a7 100644
--- a/arch/arm/boot/dts/imx6ul-geam.dts
+++ b/arch/arm/boot/dts/imx6ul-geam.dts
@@ -156,7 +156,7 @@
156}; 156};
157 157
158&i2c2 { 158&i2c2 {
159 clock_frequency = <100000>; 159 clock-frequency = <100000>;
160 pinctrl-names = "default"; 160 pinctrl-names = "default";
161 pinctrl-0 = <&pinctrl_i2c2>; 161 pinctrl-0 = <&pinctrl_i2c2>;
162 status = "okay"; 162 status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi
index b26d4f57c655..cc9adce638f5 100644
--- a/arch/arm/boot/dts/imx6ul-isiot.dtsi
+++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi
@@ -148,7 +148,7 @@
148}; 148};
149 149
150&i2c2 { 150&i2c2 {
151 clock_frequency = <100000>; 151 clock-frequency = <100000>;
152 pinctrl-names = "default"; 152 pinctrl-names = "default";
153 pinctrl-0 = <&pinctrl_i2c2>; 153 pinctrl-0 = <&pinctrl_i2c2>;
154 status = "okay"; 154 status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
index 39eeeddac39e..09f7ffa9ad8c 100644
--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
@@ -43,7 +43,7 @@
43}; 43};
44 44
45&i2c2 { 45&i2c2 {
46 clock_frequency = <100000>; 46 clock-frequency = <100000>;
47 pinctrl-names = "default"; 47 pinctrl-names = "default";
48 pinctrl-0 = <&pinctrl_i2c2>; 48 pinctrl-0 = <&pinctrl_i2c2>;
49 status = "okay"; 49 status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-pico-pi.dts b/arch/arm/boot/dts/imx6ul-pico-pi.dts
index de07357b27fc..6cd7d5877d20 100644
--- a/arch/arm/boot/dts/imx6ul-pico-pi.dts
+++ b/arch/arm/boot/dts/imx6ul-pico-pi.dts
@@ -43,7 +43,7 @@
43}; 43};
44 44
45&i2c2 { 45&i2c2 {
46 clock_frequency = <100000>; 46 clock-frequency = <100000>;
47 pinctrl-names = "default"; 47 pinctrl-names = "default";
48 pinctrl-0 = <&pinctrl_i2c2>; 48 pinctrl-0 = <&pinctrl_i2c2>;
49 status = "okay"; 49 status = "okay";
@@ -58,7 +58,7 @@
58}; 58};
59 59
60&i2c3 { 60&i2c3 {
61 clock_frequency = <100000>; 61 clock-frequency = <100000>;
62 pinctrl-names = "default"; 62 pinctrl-names = "default";
63 pinctrl-0 = <&pinctrl_i2c3>; 63 pinctrl-0 = <&pinctrl_i2c3>;
64 status = "okay"; 64 status = "okay";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 992747a57442..56907bb4b329 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -186,7 +186,7 @@
186 reg = <0x40330200 0x200>; 186 reg = <0x40330200 0x200>;
187 }; 187 };
188 188
189 usbphy1: usb-phy@0x40350000 { 189 usbphy1: usb-phy@40350000 {
190 compatible = "fsl,imx7ulp-usbphy", "fsl,imx6ul-usbphy"; 190 compatible = "fsl,imx7ulp-usbphy", "fsl,imx6ul-usbphy";
191 reg = <0x40350000 0x1000>; 191 reg = <0x40350000 0x1000>;
192 interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>; 192 interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts
index 3fa0cbe456db..0f3870d3b099 100644
--- a/arch/arm/boot/dts/vf610-bk4.dts
+++ b/arch/arm/boot/dts/vf610-bk4.dts
@@ -246,13 +246,13 @@
246 reg = <0>; 246 reg = <0>;
247 }; 247 };
248 248
249 n25q128a13_2: flash@1 { 249 n25q128a13_2: flash@2 {
250 compatible = "n25q128a13", "jedec,spi-nor"; 250 compatible = "n25q128a13", "jedec,spi-nor";
251 #address-cells = <1>; 251 #address-cells = <1>;
252 #size-cells = <1>; 252 #size-cells = <1>;
253 spi-max-frequency = <66000000>; 253 spi-max-frequency = <66000000>;
254 spi-rx-bus-width = <2>; 254 spi-rx-bus-width = <2>;
255 reg = <1>; 255 reg = <2>;
256 }; 256 };
257}; 257};
258 258
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index e6b98b6eb88d..822cddfbf1af 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -4,17 +4,9 @@ CONFIG_NO_HZ_IDLE=y
4CONFIG_HIGH_RES_TIMERS=y 4CONFIG_HIGH_RES_TIMERS=y
5CONFIG_BLK_DEV_INITRD=y 5CONFIG_BLK_DEV_INITRD=y
6CONFIG_KALLSYMS_ALL=y 6CONFIG_KALLSYMS_ALL=y
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_PARTITION_ADVANCED=y
11CONFIG_ARCH_U8500=y 7CONFIG_ARCH_U8500=y
12CONFIG_MACH_HREFV60=y
13CONFIG_MACH_SNOWBALL=y
14CONFIG_SMP=y 8CONFIG_SMP=y
15CONFIG_NR_CPUS=2 9CONFIG_NR_CPUS=2
16CONFIG_PREEMPT=y
17CONFIG_AEABI=y
18CONFIG_HIGHMEM=y 10CONFIG_HIGHMEM=y
19CONFIG_ARM_APPENDED_DTB=y 11CONFIG_ARM_APPENDED_DTB=y
20CONFIG_ARM_ATAG_DTB_COMPAT=y 12CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -25,6 +17,11 @@ CONFIG_CPU_IDLE=y
25CONFIG_ARM_U8500_CPUIDLE=y 17CONFIG_ARM_U8500_CPUIDLE=y
26CONFIG_VFP=y 18CONFIG_VFP=y
27CONFIG_NEON=y 19CONFIG_NEON=y
20CONFIG_MODULES=y
21CONFIG_MODULE_UNLOAD=y
22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_PARTITION_ADVANCED=y
24CONFIG_CMA=y
28CONFIG_NET=y 25CONFIG_NET=y
29CONFIG_PACKET=y 26CONFIG_PACKET=y
30CONFIG_UNIX=y 27CONFIG_UNIX=y
@@ -47,7 +44,6 @@ CONFIG_SMSC911X=y
47CONFIG_SMSC_PHY=y 44CONFIG_SMSC_PHY=y
48CONFIG_CW1200=y 45CONFIG_CW1200=y
49CONFIG_CW1200_WLAN_SDIO=y 46CONFIG_CW1200_WLAN_SDIO=y
50# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
51CONFIG_INPUT_EVDEV=y 47CONFIG_INPUT_EVDEV=y
52# CONFIG_KEYBOARD_ATKBD is not set 48# CONFIG_KEYBOARD_ATKBD is not set
53CONFIG_KEYBOARD_GPIO=y 49CONFIG_KEYBOARD_GPIO=y
@@ -63,7 +59,6 @@ CONFIG_RMI4_CORE=y
63CONFIG_RMI4_I2C=y 59CONFIG_RMI4_I2C=y
64CONFIG_RMI4_F11=y 60CONFIG_RMI4_F11=y
65# CONFIG_SERIO is not set 61# CONFIG_SERIO is not set
66CONFIG_VT_HW_CONSOLE_BINDING=y
67# CONFIG_LEGACY_PTYS is not set 62# CONFIG_LEGACY_PTYS is not set
68CONFIG_SERIAL_AMBA_PL011=y 63CONFIG_SERIAL_AMBA_PL011=y
69CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 64CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -72,6 +67,7 @@ CONFIG_SPI=y
72CONFIG_SPI_PL022=y 67CONFIG_SPI_PL022=y
73CONFIG_GPIO_STMPE=y 68CONFIG_GPIO_STMPE=y
74CONFIG_GPIO_TC3589X=y 69CONFIG_GPIO_TC3589X=y
70CONFIG_SENSORS_IIO_HWMON=y
75CONFIG_THERMAL=y 71CONFIG_THERMAL=y
76CONFIG_CPU_THERMAL=y 72CONFIG_CPU_THERMAL=y
77CONFIG_WATCHDOG=y 73CONFIG_WATCHDOG=y
@@ -79,6 +75,13 @@ CONFIG_MFD_STMPE=y
79CONFIG_MFD_TC3589X=y 75CONFIG_MFD_TC3589X=y
80CONFIG_REGULATOR_AB8500=y 76CONFIG_REGULATOR_AB8500=y
81CONFIG_REGULATOR_GPIO=y 77CONFIG_REGULATOR_GPIO=y
78CONFIG_DRM=y
79CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
80CONFIG_DRM_LIMA=y
81CONFIG_DRM_MCDE=y
82CONFIG_BACKLIGHT_CLASS_DEVICE=y
83CONFIG_BACKLIGHT_GENERIC=m
84CONFIG_LOGO=y
82CONFIG_SOUND=y 85CONFIG_SOUND=y
83CONFIG_SND=y 86CONFIG_SND=y
84CONFIG_SND_SOC=y 87CONFIG_SND_SOC=y
@@ -87,6 +90,7 @@ CONFIG_SND_SOC_UX500_MACH_MOP500=y
87CONFIG_USB=y 90CONFIG_USB=y
88CONFIG_USB_MUSB_HDRC=y 91CONFIG_USB_MUSB_HDRC=y
89CONFIG_USB_MUSB_UX500=y 92CONFIG_USB_MUSB_UX500=y
93CONFIG_MUSB_PIO_ONLY=y
90CONFIG_AB8500_USB=y 94CONFIG_AB8500_USB=y
91CONFIG_USB_GADGET=y 95CONFIG_USB_GADGET=y
92CONFIG_USB_ETH=m 96CONFIG_USB_ETH=m
@@ -103,6 +107,7 @@ CONFIG_RTC_DRV_AB8500=y
103CONFIG_RTC_DRV_PL031=y 107CONFIG_RTC_DRV_PL031=y
104CONFIG_DMADEVICES=y 108CONFIG_DMADEVICES=y
105CONFIG_STE_DMA40=y 109CONFIG_STE_DMA40=y
110CONFIG_HWSPINLOCK=y
106CONFIG_HSEM_U8500=y 111CONFIG_HSEM_U8500=y
107CONFIG_IIO=y 112CONFIG_IIO=y
108CONFIG_IIO_SW_TRIGGER=y 113CONFIG_IIO_SW_TRIGGER=y
@@ -126,20 +131,19 @@ CONFIG_NFS_FS=y
126CONFIG_ROOT_NFS=y 131CONFIG_ROOT_NFS=y
127CONFIG_NLS_CODEPAGE_437=y 132CONFIG_NLS_CODEPAGE_437=y
128CONFIG_NLS_ISO8859_1=y 133CONFIG_NLS_ISO8859_1=y
134CONFIG_CRYPTO_DEV_UX500=y
135CONFIG_CRYPTO_DEV_UX500_CRYP=y
136CONFIG_CRYPTO_DEV_UX500_HASH=y
137CONFIG_CRYPTO_DEV_UX500_DEBUG=y
129CONFIG_PRINTK_TIME=y 138CONFIG_PRINTK_TIME=y
130CONFIG_DEBUG_INFO=y 139CONFIG_DEBUG_INFO=y
131CONFIG_DEBUG_FS=y 140CONFIG_DEBUG_FS=y
132CONFIG_MAGIC_SYSRQ=y 141CONFIG_MAGIC_SYSRQ=y
133CONFIG_DEBUG_KERNEL=y 142CONFIG_DEBUG_KERNEL=y
134# CONFIG_SCHED_DEBUG is not set 143# CONFIG_SCHED_DEBUG is not set
135# CONFIG_DEBUG_PREEMPT is not set
136# CONFIG_FTRACE is not set 144# CONFIG_FTRACE is not set
137CONFIG_DEBUG_USER=y 145CONFIG_DEBUG_USER=y
138CONFIG_CORESIGHT=y 146CONFIG_CORESIGHT=y
139CONFIG_CORESIGHT_SINK_TPIU=y 147CONFIG_CORESIGHT_SINK_TPIU=y
140CONFIG_CORESIGHT_SINK_ETBV10=y 148CONFIG_CORESIGHT_SINK_ETBV10=y
141CONFIG_CORESIGHT_SOURCE_ETM3X=y 149CONFIG_CORESIGHT_SOURCE_ETM3X=y
142CONFIG_CRYPTO_DEV_UX500=y
143CONFIG_CRYPTO_DEV_UX500_CRYP=y
144CONFIG_CRYPTO_DEV_UX500_HASH=y
145CONFIG_CRYPTO_DEV_UX500_DEBUG=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7e0486ad1318..dba9355e2484 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
18 18
19static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 19static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
20{ 20{
21 return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL; 21 if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
22 return &arm_dma_ops;
23 return NULL;
22} 24}
23 25
24#ifdef __arch_page_to_dma 26#ifdef __arch_page_to_dma
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index af8b8e15f589..b0c195e3a06d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -544,6 +544,7 @@ static int arch_build_bp_info(struct perf_event *bp,
544 if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) 544 if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
545 && max_watchpoint_len >= 8) 545 && max_watchpoint_len >= 8)
546 break; 546 break;
547 /* Else, fall through */
547 default: 548 default:
548 return -EINVAL; 549 return -EINVAL;
549 } 550 }
@@ -608,10 +609,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
608 /* Allow halfword watchpoints and breakpoints. */ 609 /* Allow halfword watchpoints and breakpoints. */
609 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) 610 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
610 break; 611 break;
612 /* Else, fall through */
611 case 3: 613 case 3:
612 /* Allow single byte watchpoint. */ 614 /* Allow single byte watchpoint. */
613 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) 615 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
614 break; 616 break;
617 /* Else, fall through */
615 default: 618 default:
616 ret = -EINVAL; 619 ret = -EINVAL;
617 goto out; 620 goto out;
@@ -861,6 +864,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
861 break; 864 break;
862 case ARM_ENTRY_ASYNC_WATCHPOINT: 865 case ARM_ENTRY_ASYNC_WATCHPOINT:
863 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 866 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
867 /* Fall through */
864 case ARM_ENTRY_SYNC_WATCHPOINT: 868 case ARM_ENTRY_SYNC_WATCHPOINT:
865 watchpoint_handler(addr, fsr, regs); 869 watchpoint_handler(addr, fsr, regs);
866 break; 870 break;
@@ -909,6 +913,7 @@ static bool core_has_os_save_restore(void)
909 ARM_DBG_READ(c1, c1, 4, oslsr); 913 ARM_DBG_READ(c1, c1, 4, oslsr);
910 if (oslsr & ARM_OSLSR_OSLM0) 914 if (oslsr & ARM_OSLSR_OSLM0)
911 return true; 915 return true;
916 /* Else, fall through */
912 default: 917 default:
913 return false; 918 return false;
914 } 919 }
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 09f6fdd41974..ab2568996ddb 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -596,6 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall)
596 switch (retval) { 596 switch (retval) {
597 case -ERESTART_RESTARTBLOCK: 597 case -ERESTART_RESTARTBLOCK:
598 restart -= 2; 598 restart -= 2;
599 /* Fall through */
599 case -ERESTARTNOHAND: 600 case -ERESTARTNOHAND:
600 case -ERESTARTSYS: 601 case -ERESTARTSYS:
601 case -ERESTARTNOINTR: 602 case -ERESTARTNOINTR:
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index d2806bcff8bb..07745ee022a1 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -651,13 +651,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
651} 651}
652 652
653static void reset_coproc_regs(struct kvm_vcpu *vcpu, 653static void reset_coproc_regs(struct kvm_vcpu *vcpu,
654 const struct coproc_reg *table, size_t num) 654 const struct coproc_reg *table, size_t num,
655 unsigned long *bmap)
655{ 656{
656 unsigned long i; 657 unsigned long i;
657 658
658 for (i = 0; i < num; i++) 659 for (i = 0; i < num; i++)
659 if (table[i].reset) 660 if (table[i].reset) {
661 int reg = table[i].reg;
662
660 table[i].reset(vcpu, &table[i]); 663 table[i].reset(vcpu, &table[i]);
664 if (reg > 0 && reg < NR_CP15_REGS) {
665 set_bit(reg, bmap);
666 if (table[i].is_64bit)
667 set_bit(reg + 1, bmap);
668 }
669 }
661} 670}
662 671
663static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) 672static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
@@ -1432,17 +1441,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1432{ 1441{
1433 size_t num; 1442 size_t num;
1434 const struct coproc_reg *table; 1443 const struct coproc_reg *table;
1435 1444 DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
1436 /* Catch someone adding a register without putting in reset entry. */
1437 memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1438 1445
1439 /* Generic chip reset first (so target could override). */ 1446 /* Generic chip reset first (so target could override). */
1440 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); 1447 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
1441 1448
1442 table = get_target_table(vcpu->arch.target, &num); 1449 table = get_target_table(vcpu->arch.target, &num);
1443 reset_coproc_regs(vcpu, table, num); 1450 reset_coproc_regs(vcpu, table, num, bmap);
1444 1451
1445 for (num = 1; num < NR_CP15_REGS; num++) 1452 for (num = 1; num < NR_CP15_REGS; num++)
1446 WARN(vcpu_cp15(vcpu, num) == 0x42424242, 1453 WARN(!test_bit(num, bmap),
1447 "Didn't reset vcpu_cp15(vcpu, %zi)", num); 1454 "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1448} 1455}
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 1d5210eb4776..582925238d65 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -66,7 +66,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
66 66
671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists, 671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one 68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
69 teq r3, r2, lsr #10 @ instruction 69 teq r3, r2, lsr #11 @ instruction
70 subne r0, sv_pc, #4 @ allow for mov 70 subne r0, sv_pc, #4 @ allow for mov
71 subeq r0, sv_pc, #8 @ allow for mov + stmia 71 subeq r0, sv_pc, #8 @ allow for mov + stmia
72 72
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index 05d03f09ff54..71262dcdbca3 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -24,6 +24,7 @@
24#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31) 24#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
25 25
26 .text 26 .text
27 .arch armv5te
27/* 28/*
28 * Move DaVinci into deep sleep state 29 * Move DaVinci into deep sleep state
29 * 30 *
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 1c9a4be8b503..1c05c5bf7e5c 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -49,6 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
49 * FALLTHROUGH: Ensure we don't try to overwrite our newly 49 * FALLTHROUGH: Ensure we don't try to overwrite our newly
50 * initialised state information on the first fault. 50 * initialised state information on the first fault.
51 */ 51 */
52 /* Fall through */
52 53
53 case THREAD_NOTIFY_EXIT: 54 case THREAD_NOTIFY_EXIT:
54 crunch_task_release(thread); 55 crunch_task_release(thread);
diff --git a/arch/arm/mach-netx/Kconfig b/arch/arm/mach-netx/Kconfig
deleted file mode 100644
index 1e5d9c870784..000000000000
--- a/arch/arm/mach-netx/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0-only
2menu "NetX Implementations"
3 depends on ARCH_NETX
4
5config MACH_NXDKN
6 bool "Enable Hilscher nxdkn Eval Board support"
7 help
8 Board support for the Hilscher NetX Eval Board
9
10config MACH_NXDB500
11 bool "Enable Hilscher nxdb500 Eval Board support"
12 select ARM_AMBA
13 help
14 Board support for the Hilscher nxdb500 Eval Board
15
16config MACH_NXEB500HMI
17 bool "Enable Hilscher nxeb500hmi Eval Board support"
18 select ARM_AMBA
19 help
20 Board support for the Hilscher nxeb500hmi Eval Board
21
22endmenu
diff --git a/arch/arm/mach-netx/Makefile b/arch/arm/mach-netx/Makefile
deleted file mode 100644
index 44ea83f7d9c2..000000000000
--- a/arch/arm/mach-netx/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Makefile for the linux kernel.
4#
5
6# Object file lists.
7
8obj-y += time.o generic.o pfifo.o xc.o
9
10# Specific board support
11obj-$(CONFIG_MACH_NXDKN) += nxdkn.o
12obj-$(CONFIG_MACH_NXDB500) += nxdb500.o fb.o
13obj-$(CONFIG_MACH_NXEB500HMI) += nxeb500hmi.o fb.o
diff --git a/arch/arm/mach-netx/Makefile.boot b/arch/arm/mach-netx/Makefile.boot
deleted file mode 100644
index 2eb23c0cb6b0..000000000000
--- a/arch/arm/mach-netx/Makefile.boot
+++ /dev/null
@@ -1,3 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0-only
2 zreladdr-y += 0x80008000
3
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
deleted file mode 100644
index 2dc80db07390..000000000000
--- a/arch/arm/mach-netx/fb.c
+++ /dev/null
@@ -1,65 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/fb.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/dma-mapping.h>
11#include <linux/amba/bus.h>
12#include <linux/amba/clcd.h>
13#include <linux/err.h>
14#include <linux/gfp.h>
15
16#include <asm/irq.h>
17
18#include <mach/netx-regs.h>
19#include <mach/hardware.h>
20
21static struct clcd_panel *netx_panel;
22
23void netx_clcd_enable(struct clcd_fb *fb)
24{
25}
26
27int netx_clcd_setup(struct clcd_fb *fb)
28{
29 dma_addr_t dma;
30
31 fb->panel = netx_panel;
32
33 fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, 1024 * 1024, &dma,
34 GFP_KERNEL);
35 if (!fb->fb.screen_base) {
36 printk(KERN_ERR "CLCD: unable to map framebuffer\n");
37 return -ENOMEM;
38 }
39
40 fb->fb.fix.smem_start = dma;
41 fb->fb.fix.smem_len = 1024*1024;
42
43 return 0;
44}
45
46int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
47{
48 return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
49 fb->fb.fix.smem_start, fb->fb.fix.smem_len);
50}
51
52void netx_clcd_remove(struct clcd_fb *fb)
53{
54 dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
55 fb->fb.fix.smem_start);
56}
57
58static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL);
59
60int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel)
61{
62 netx_panel = panel;
63 fb_device.dev.platform_data = board;
64 return amba_device_register(&fb_device, &iomem_resource);
65}
diff --git a/arch/arm/mach-netx/fb.h b/arch/arm/mach-netx/fb.h
deleted file mode 100644
index 5cdc01fc3c86..000000000000
--- a/arch/arm/mach-netx/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/fb.h
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8void netx_clcd_enable(struct clcd_fb *fb);
9int netx_clcd_setup(struct clcd_fb *fb);
10int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma);
11void netx_clcd_remove(struct clcd_fb *fb);
12int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel);
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
deleted file mode 100644
index 88881fd45e9f..000000000000
--- a/arch/arm/mach-netx/generic.c
+++ /dev/null
@@ -1,182 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/generic.c
4 *
5 * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/device.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/io.h>
14#include <linux/irqchip/arm-vic.h>
15#include <linux/reboot.h>
16#include <mach/hardware.h>
17#include <asm/mach/map.h>
18#include <mach/netx-regs.h>
19#include <asm/mach/irq.h>
20
21static struct map_desc netx_io_desc[] __initdata = {
22 {
23 .virtual = NETX_IO_VIRT,
24 .pfn = __phys_to_pfn(NETX_IO_PHYS),
25 .length = NETX_IO_SIZE,
26 .type = MT_DEVICE
27 }
28};
29
30void __init netx_map_io(void)
31{
32 iotable_init(netx_io_desc, ARRAY_SIZE(netx_io_desc));
33}
34
35static struct resource netx_rtc_resources[] = {
36 [0] = {
37 .start = 0x00101200,
38 .end = 0x00101220,
39 .flags = IORESOURCE_MEM,
40 },
41};
42
43static struct platform_device netx_rtc_device = {
44 .name = "netx-rtc",
45 .id = 0,
46 .num_resources = ARRAY_SIZE(netx_rtc_resources),
47 .resource = netx_rtc_resources,
48};
49
50static struct platform_device *devices[] __initdata = {
51 &netx_rtc_device,
52};
53
54#if 0
55#define DEBUG_IRQ(fmt...) printk(fmt)
56#else
57#define DEBUG_IRQ(fmt...) while (0) {}
58#endif
59
60static void netx_hif_demux_handler(struct irq_desc *desc)
61{
62 unsigned int irq = NETX_IRQ_HIF_CHAINED(0);
63 unsigned int stat;
64
65 stat = ((readl(NETX_DPMAS_INT_EN) &
66 readl(NETX_DPMAS_INT_STAT)) >> 24) & 0x1f;
67
68 while (stat) {
69 if (stat & 1) {
70 DEBUG_IRQ("handling irq %d\n", irq);
71 generic_handle_irq(irq);
72 }
73 irq++;
74 stat >>= 1;
75 }
76}
77
78static int
79netx_hif_irq_type(struct irq_data *d, unsigned int type)
80{
81 unsigned int val, irq;
82
83 val = readl(NETX_DPMAS_IF_CONF1);
84
85 irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
86
87 if (type & IRQ_TYPE_EDGE_RISING) {
88 DEBUG_IRQ("rising edges\n");
89 val |= (1 << 26) << irq;
90 }
91 if (type & IRQ_TYPE_EDGE_FALLING) {
92 DEBUG_IRQ("falling edges\n");
93 val &= ~((1 << 26) << irq);
94 }
95 if (type & IRQ_TYPE_LEVEL_LOW) {
96 DEBUG_IRQ("low level\n");
97 val &= ~((1 << 26) << irq);
98 }
99 if (type & IRQ_TYPE_LEVEL_HIGH) {
100 DEBUG_IRQ("high level\n");
101 val |= (1 << 26) << irq;
102 }
103
104 writel(val, NETX_DPMAS_IF_CONF1);
105
106 return 0;
107}
108
109static void
110netx_hif_ack_irq(struct irq_data *d)
111{
112 unsigned int val, irq;
113
114 irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
115 writel((1 << 24) << irq, NETX_DPMAS_INT_STAT);
116
117 val = readl(NETX_DPMAS_INT_EN);
118 val &= ~((1 << 24) << irq);
119 writel(val, NETX_DPMAS_INT_EN);
120
121 DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
122}
123
124static void
125netx_hif_mask_irq(struct irq_data *d)
126{
127 unsigned int val, irq;
128
129 irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
130 val = readl(NETX_DPMAS_INT_EN);
131 val &= ~((1 << 24) << irq);
132 writel(val, NETX_DPMAS_INT_EN);
133 DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
134}
135
136static void
137netx_hif_unmask_irq(struct irq_data *d)
138{
139 unsigned int val, irq;
140
141 irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
142 val = readl(NETX_DPMAS_INT_EN);
143 val |= (1 << 24) << irq;
144 writel(val, NETX_DPMAS_INT_EN);
145 DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
146}
147
148static struct irq_chip netx_hif_chip = {
149 .irq_ack = netx_hif_ack_irq,
150 .irq_mask = netx_hif_mask_irq,
151 .irq_unmask = netx_hif_unmask_irq,
152 .irq_set_type = netx_hif_irq_type,
153};
154
155void __init netx_init_irq(void)
156{
157 int irq;
158
159 vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
160
161 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
162 irq_set_chip_and_handler(irq, &netx_hif_chip,
163 handle_level_irq);
164 irq_clear_status_flags(irq, IRQ_NOREQUEST);
165 }
166
167 writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN);
168 irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler);
169}
170
171static int __init netx_init(void)
172{
173 return platform_add_devices(devices, ARRAY_SIZE(devices));
174}
175
176subsys_initcall(netx_init);
177
178void netx_restart(enum reboot_mode mode, const char *cmd)
179{
180 writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES,
181 NETX_SYSTEM_RES_CR);
182}
diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h
deleted file mode 100644
index 223e304574a5..000000000000
--- a/arch/arm/mach-netx/generic.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/generic.h
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/reboot.h>
9
10extern void __init netx_map_io(void);
11extern void __init netx_init_irq(void);
12extern void netx_restart(enum reboot_mode, const char *);
13
14extern void netx_timer_init(void);
diff --git a/arch/arm/mach-netx/include/mach/hardware.h b/arch/arm/mach-netx/include/mach/hardware.h
deleted file mode 100644
index 84253993d1e0..000000000000
--- a/arch/arm/mach-netx/include/mach/hardware.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/include/mach/hardware.h
4 *
5 * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7#ifndef __ASM_ARCH_HARDWARE_H
8#define __ASM_ARCH_HARDWARE_H
9
10#define NETX_IO_PHYS 0x00100000
11#define NETX_IO_VIRT 0xe0000000
12#define NETX_IO_SIZE 0x00100000
13
14#define SRAM_INTERNAL_PHYS_0 0x00000
15#define SRAM_INTERNAL_PHYS_1 0x08000
16#define SRAM_INTERNAL_PHYS_2 0x10000
17#define SRAM_INTERNAL_PHYS_3 0x18000
18#define SRAM_INTERNAL_PHYS(no) ((no) * 0x8000)
19
20#define XPEC_MEM_SIZE 0x4000
21#define XMAC_MEM_SIZE 0x1000
22#define SRAM_MEM_SIZE 0x8000
23
24#define io_p2v(x) IOMEM((x) - NETX_IO_PHYS + NETX_IO_VIRT)
25#define io_v2p(x) ((x) - NETX_IO_VIRT + NETX_IO_PHYS)
26
27#endif
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h
deleted file mode 100644
index 540c92104fe8..000000000000
--- a/arch/arm/mach-netx/include/mach/irqs.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/include/mach/irqs.h
4 *
5 * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#define NETX_IRQ_VIC_START 64
9#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
10#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
11#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
12#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
13#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
14#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
15#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
16#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
17#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
18#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
19#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
20#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
21#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
22#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
23#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
24#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
25#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
26#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
27#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
28#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
29#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
30#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
31#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
32#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
33#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
34#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
35#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
36#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
37/* int 27 is reserved */
38/* int 28 is reserved */
39#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
40#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
41/* int 31 is reserved */
42
43#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
44
45/* for multiplexed irqs on gpio 0..14 */
46#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
47#define NETX_IRQ_GPIO_LAST NETX_IRQ_GPIO(14)
48
49/* Host interface interrupts */
50#define NETX_IRQ_HIF_CHAINED(x) (NETX_IRQ_GPIO_LAST + 1 + (x))
51#define NETX_IRQ_HIF_PIO35 NETX_IRQ_HIF_CHAINED(0)
52#define NETX_IRQ_HIF_PIO36 NETX_IRQ_HIF_CHAINED(1)
53#define NETX_IRQ_HIF_PIO40 NETX_IRQ_HIF_CHAINED(2)
54#define NETX_IRQ_HIF_PIO47 NETX_IRQ_HIF_CHAINED(3)
55#define NETX_IRQ_HIF_PIO72 NETX_IRQ_HIF_CHAINED(4)
56#define NETX_IRQ_HIF_LAST NETX_IRQ_HIF_CHAINED(4)
57
58#define NR_IRQS (NETX_IRQ_HIF_LAST + 1)
diff --git a/arch/arm/mach-netx/include/mach/netx-regs.h b/arch/arm/mach-netx/include/mach/netx-regs.h
deleted file mode 100644
index 7c356a6ab80b..000000000000
--- a/arch/arm/mach-netx/include/mach/netx-regs.h
+++ /dev/null
@@ -1,420 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/include/mach/netx-regs.h
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#ifndef __ASM_ARCH_NETX_REGS_H
9#define __ASM_ARCH_NETX_REGS_H
10
11/* offsets relative to the beginning of the io space */
12#define NETX_OFS_SYSTEM 0x00000
13#define NETX_OFS_MEMCR 0x00100
14#define NETX_OFS_DPMAS 0x03000
15#define NETX_OFS_GPIO 0x00800
16#define NETX_OFS_PIO 0x00900
17#define NETX_OFS_UART0 0x00a00
18#define NETX_OFS_UART1 0x00a40
19#define NETX_OFS_UART2 0x00a80
20#define NETX_OF_MIIMU 0x00b00
21#define NETX_OFS_SPI 0x00c00
22#define NETX_OFS_I2C 0x00d00
23#define NETX_OFS_SYSTIME 0x01100
24#define NETX_OFS_RTC 0x01200
25#define NETX_OFS_EXTBUS 0x03600
26#define NETX_OFS_LCD 0x04000
27#define NETX_OFS_USB 0x20000
28#define NETX_OFS_XMAC0 0x60000
29#define NETX_OFS_XMAC1 0x61000
30#define NETX_OFS_XMAC2 0x62000
31#define NETX_OFS_XMAC3 0x63000
32#define NETX_OFS_XMAC(no) (0x60000 + (no) * 0x1000)
33#define NETX_OFS_PFIFO 0x64000
34#define NETX_OFS_XPEC0 0x70000
35#define NETX_OFS_XPEC1 0x74000
36#define NETX_OFS_XPEC2 0x78000
37#define NETX_OFS_XPEC3 0x7c000
38#define NETX_OFS_XPEC(no) (0x70000 + (no) * 0x4000)
39#define NETX_OFS_VIC 0xff000
40
41/* physical addresses */
42#define NETX_PA_SYSTEM (NETX_IO_PHYS + NETX_OFS_SYSTEM)
43#define NETX_PA_MEMCR (NETX_IO_PHYS + NETX_OFS_MEMCR)
44#define NETX_PA_DPMAS (NETX_IO_PHYS + NETX_OFS_DPMAS)
45#define NETX_PA_GPIO (NETX_IO_PHYS + NETX_OFS_GPIO)
46#define NETX_PA_PIO (NETX_IO_PHYS + NETX_OFS_PIO)
47#define NETX_PA_UART0 (NETX_IO_PHYS + NETX_OFS_UART0)
48#define NETX_PA_UART1 (NETX_IO_PHYS + NETX_OFS_UART1)
49#define NETX_PA_UART2 (NETX_IO_PHYS + NETX_OFS_UART2)
50#define NETX_PA_MIIMU (NETX_IO_PHYS + NETX_OF_MIIMU)
51#define NETX_PA_SPI (NETX_IO_PHYS + NETX_OFS_SPI)
52#define NETX_PA_I2C (NETX_IO_PHYS + NETX_OFS_I2C)
53#define NETX_PA_SYSTIME (NETX_IO_PHYS + NETX_OFS_SYSTIME)
54#define NETX_PA_RTC (NETX_IO_PHYS + NETX_OFS_RTC)
55#define NETX_PA_EXTBUS (NETX_IO_PHYS + NETX_OFS_EXTBUS)
56#define NETX_PA_LCD (NETX_IO_PHYS + NETX_OFS_LCD)
57#define NETX_PA_USB (NETX_IO_PHYS + NETX_OFS_USB)
58#define NETX_PA_XMAC0 (NETX_IO_PHYS + NETX_OFS_XMAC0)
59#define NETX_PA_XMAC1 (NETX_IO_PHYS + NETX_OFS_XMAC1)
60#define NETX_PA_XMAC2 (NETX_IO_PHYS + NETX_OFS_XMAC2)
61#define NETX_PA_XMAC3 (NETX_IO_PHYS + NETX_OFS_XMAC3)
62#define NETX_PA_XMAC(no) (NETX_IO_PHYS + NETX_OFS_XMAC(no))
63#define NETX_PA_PFIFO (NETX_IO_PHYS + NETX_OFS_PFIFO)
64#define NETX_PA_XPEC0 (NETX_IO_PHYS + NETX_OFS_XPEC0)
65#define NETX_PA_XPEC1 (NETX_IO_PHYS + NETX_OFS_XPEC1)
66#define NETX_PA_XPEC2 (NETX_IO_PHYS + NETX_OFS_XPEC2)
67#define NETX_PA_XPEC3 (NETX_IO_PHYS + NETX_OFS_XPEC3)
68#define NETX_PA_XPEC(no) (NETX_IO_PHYS + NETX_OFS_XPEC(no))
69#define NETX_PA_VIC (NETX_IO_PHYS + NETX_OFS_VIC)
70
71/* virtual addresses */
72#define NETX_VA_SYSTEM (NETX_IO_VIRT + NETX_OFS_SYSTEM)
73#define NETX_VA_MEMCR (NETX_IO_VIRT + NETX_OFS_MEMCR)
74#define NETX_VA_DPMAS (NETX_IO_VIRT + NETX_OFS_DPMAS)
75#define NETX_VA_GPIO (NETX_IO_VIRT + NETX_OFS_GPIO)
76#define NETX_VA_PIO (NETX_IO_VIRT + NETX_OFS_PIO)
77#define NETX_VA_UART0 (NETX_IO_VIRT + NETX_OFS_UART0)
78#define NETX_VA_UART1 (NETX_IO_VIRT + NETX_OFS_UART1)
79#define NETX_VA_UART2 (NETX_IO_VIRT + NETX_OFS_UART2)
80#define NETX_VA_MIIMU (NETX_IO_VIRT + NETX_OF_MIIMU)
81#define NETX_VA_SPI (NETX_IO_VIRT + NETX_OFS_SPI)
82#define NETX_VA_I2C (NETX_IO_VIRT + NETX_OFS_I2C)
83#define NETX_VA_SYSTIME (NETX_IO_VIRT + NETX_OFS_SYSTIME)
84#define NETX_VA_RTC (NETX_IO_VIRT + NETX_OFS_RTC)
85#define NETX_VA_EXTBUS (NETX_IO_VIRT + NETX_OFS_EXTBUS)
86#define NETX_VA_LCD (NETX_IO_VIRT + NETX_OFS_LCD)
87#define NETX_VA_USB (NETX_IO_VIRT + NETX_OFS_USB)
88#define NETX_VA_XMAC0 (NETX_IO_VIRT + NETX_OFS_XMAC0)
89#define NETX_VA_XMAC1 (NETX_IO_VIRT + NETX_OFS_XMAC1)
90#define NETX_VA_XMAC2 (NETX_IO_VIRT + NETX_OFS_XMAC2)
91#define NETX_VA_XMAC3 (NETX_IO_VIRT + NETX_OFS_XMAC3)
92#define NETX_VA_XMAC(no) (NETX_IO_VIRT + NETX_OFS_XMAC(no))
93#define NETX_VA_PFIFO (NETX_IO_VIRT + NETX_OFS_PFIFO)
94#define NETX_VA_XPEC0 (NETX_IO_VIRT + NETX_OFS_XPEC0)
95#define NETX_VA_XPEC1 (NETX_IO_VIRT + NETX_OFS_XPEC1)
96#define NETX_VA_XPEC2 (NETX_IO_VIRT + NETX_OFS_XPEC2)
97#define NETX_VA_XPEC3 (NETX_IO_VIRT + NETX_OFS_XPEC3)
98#define NETX_VA_XPEC(no) (NETX_IO_VIRT + NETX_OFS_XPEC(no))
99#define NETX_VA_VIC (NETX_IO_VIRT + NETX_OFS_VIC)
100
101/*********************************
102 * System functions *
103 *********************************/
104
105/* Registers */
106#define NETX_SYSTEM_REG(ofs) IOMEM(NETX_VA_SYSTEM + (ofs))
107#define NETX_SYSTEM_BOO_SR NETX_SYSTEM_REG(0x00)
108#define NETX_SYSTEM_IOC_CR NETX_SYSTEM_REG(0x04)
109#define NETX_SYSTEM_IOC_MR NETX_SYSTEM_REG(0x08)
110
111/* FIXME: Docs are not consistent */
112/* #define NETX_SYSTEM_RES_CR NETX_SYSTEM_REG(0x08) */
113#define NETX_SYSTEM_RES_CR NETX_SYSTEM_REG(0x0c)
114
115#define NETX_SYSTEM_PHY_CONTROL NETX_SYSTEM_REG(0x10)
116#define NETX_SYSTEM_REV NETX_SYSTEM_REG(0x34)
117#define NETX_SYSTEM_IOC_ACCESS_KEY NETX_SYSTEM_REG(0x70)
118#define NETX_SYSTEM_WDG_TR NETX_SYSTEM_REG(0x200)
119#define NETX_SYSTEM_WDG_CTR NETX_SYSTEM_REG(0x204)
120#define NETX_SYSTEM_WDG_IRQ_TIMEOUT NETX_SYSTEM_REG(0x208)
121#define NETX_SYSTEM_WDG_RES_TIMEOUT NETX_SYSTEM_REG(0x20c)
122
123/* Bits */
124#define NETX_SYSTEM_RES_CR_RSTIN (1<<0)
125#define NETX_SYSTEM_RES_CR_WDG_RES (1<<1)
126#define NETX_SYSTEM_RES_CR_HOST_RES (1<<2)
127#define NETX_SYSTEM_RES_CR_FIRMW_RES (1<<3)
128#define NETX_SYSTEM_RES_CR_XPEC0_RES (1<<4)
129#define NETX_SYSTEM_RES_CR_XPEC1_RES (1<<5)
130#define NETX_SYSTEM_RES_CR_XPEC2_RES (1<<6)
131#define NETX_SYSTEM_RES_CR_XPEC3_RES (1<<7)
132#define NETX_SYSTEM_RES_CR_DIS_XPEC0_RES (1<<16)
133#define NETX_SYSTEM_RES_CR_DIS_XPEC1_RES (1<<17)
134#define NETX_SYSTEM_RES_CR_DIS_XPEC2_RES (1<<18)
135#define NETX_SYSTEM_RES_CR_DIS_XPEC3_RES (1<<19)
136#define NETX_SYSTEM_RES_CR_FIRMW_FLG0 (1<<20)
137#define NETX_SYSTEM_RES_CR_FIRMW_FLG1 (1<<21)
138#define NETX_SYSTEM_RES_CR_FIRMW_FLG2 (1<<22)
139#define NETX_SYSTEM_RES_CR_FIRMW_FLG3 (1<<23)
140#define NETX_SYSTEM_RES_CR_FIRMW_RES_EN (1<<24)
141#define NETX_SYSTEM_RES_CR_RSTOUT (1<<25)
142#define NETX_SYSTEM_RES_CR_EN_RSTOUT (1<<26)
143
144#define PHY_CONTROL_RESET (1<<31)
145#define PHY_CONTROL_SIM_BYP (1<<30)
146#define PHY_CONTROL_CLK_XLATIN (1<<29)
147#define PHY_CONTROL_PHY1_EN (1<<21)
148#define PHY_CONTROL_PHY1_NP_MSG_CODE
149#define PHY_CONTROL_PHY1_AUTOMDIX (1<<17)
150#define PHY_CONTROL_PHY1_FIXMODE (1<<16)
151#define PHY_CONTROL_PHY1_MODE(mode) (((mode) & 0x7) << 13)
152#define PHY_CONTROL_PHY0_EN (1<<12)
153#define PHY_CONTROL_PHY0_NP_MSG_CODE
154#define PHY_CONTROL_PHY0_AUTOMDIX (1<<8)
155#define PHY_CONTROL_PHY0_FIXMODE (1<<7)
156#define PHY_CONTROL_PHY0_MODE(mode) (((mode) & 0x7) << 4)
157#define PHY_CONTROL_PHY_ADDRESS(adr) ((adr) & 0xf)
158
159#define PHY_MODE_10BASE_T_HALF 0
160#define PHY_MODE_10BASE_T_FULL 1
161#define PHY_MODE_100BASE_TX_FX_FULL 2
162#define PHY_MODE_100BASE_TX_FX_HALF 3
163#define PHY_MODE_100BASE_TX_HALF 4
164#define PHY_MODE_REPEATER 5
165#define PHY_MODE_POWER_DOWN 6
166#define PHY_MODE_ALL 7
167
168/* Bits */
169#define VECT_CNTL_ENABLE (1 << 5)
170
171/*******************************
172 * GPIO and timer module *
173 *******************************/
174
175/* Registers */
176#define NETX_GPIO_REG(ofs) IOMEM(NETX_VA_GPIO + (ofs))
177#define NETX_GPIO_CFG(gpio) NETX_GPIO_REG(0x0 + ((gpio)<<2))
178#define NETX_GPIO_THRESHOLD_CAPTURE(gpio) NETX_GPIO_REG(0x40 + ((gpio)<<2))
179#define NETX_GPIO_COUNTER_CTRL(counter) NETX_GPIO_REG(0x80 + ((counter)<<2))
180#define NETX_GPIO_COUNTER_MAX(counter) NETX_GPIO_REG(0x94 + ((counter)<<2))
181#define NETX_GPIO_COUNTER_CURRENT(counter) NETX_GPIO_REG(0xa8 + ((counter)<<2))
182#define NETX_GPIO_IRQ_ENABLE NETX_GPIO_REG(0xbc)
183#define NETX_GPIO_IRQ_DISABLE NETX_GPIO_REG(0xc0)
184#define NETX_GPIO_SYSTIME_NS_CMP NETX_GPIO_REG(0xc4)
185#define NETX_GPIO_LINE NETX_GPIO_REG(0xc8)
186#define NETX_GPIO_IRQ NETX_GPIO_REG(0xd0)
187
188/* Bits */
189#define NETX_GPIO_CFG_IOCFG_GP_INPUT (0x0)
190#define NETX_GPIO_CFG_IOCFG_GP_OUTPUT (0x1)
191#define NETX_GPIO_CFG_IOCFG_GP_UART (0x2)
192#define NETX_GPIO_CFG_INV (1<<2)
193#define NETX_GPIO_CFG_MODE_INPUT_READ (0<<3)
194#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_CONT_RISING (1<<3)
195#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_ONCE_RISING (2<<3)
196#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_HIGH_LEVEL (3<<3)
197#define NETX_GPIO_CFG_COUNT_REF_COUNTER0 (0<<5)
198#define NETX_GPIO_CFG_COUNT_REF_COUNTER1 (1<<5)
199#define NETX_GPIO_CFG_COUNT_REF_COUNTER2 (2<<5)
200#define NETX_GPIO_CFG_COUNT_REF_COUNTER3 (3<<5)
201#define NETX_GPIO_CFG_COUNT_REF_COUNTER4 (4<<5)
202#define NETX_GPIO_CFG_COUNT_REF_SYSTIME (7<<5)
203
204#define NETX_GPIO_COUNTER_CTRL_RUN (1<<0)
205#define NETX_GPIO_COUNTER_CTRL_SYM (1<<1)
206#define NETX_GPIO_COUNTER_CTRL_ONCE (1<<2)
207#define NETX_GPIO_COUNTER_CTRL_IRQ_EN (1<<3)
208#define NETX_GPIO_COUNTER_CTRL_CNT_EVENT (1<<4)
209#define NETX_GPIO_COUNTER_CTRL_RST_EN (1<<5)
210#define NETX_GPIO_COUNTER_CTRL_SEL_EVENT (1<<6)
211#define NETX_GPIO_COUNTER_CTRL_GPIO_REF /* FIXME */
212
213#define GPIO_BIT(gpio) (1<<(gpio))
214#define COUNTER_BIT(counter) ((1<<16)<<(counter))
215
216/*******************************
217 * PIO *
218 *******************************/
219
220/* Registers */
221#define NETX_PIO_REG(ofs) IOMEM(NETX_VA_PIO + (ofs))
222#define NETX_PIO_INPIO NETX_PIO_REG(0x0)
223#define NETX_PIO_OUTPIO NETX_PIO_REG(0x4)
224#define NETX_PIO_OEPIO NETX_PIO_REG(0x8)
225
226/*******************************
227 * MII Unit *
228 *******************************/
229
230/* Registers */
231#define NETX_MIIMU IOMEM(NETX_VA_MIIMU)
232
233/* Bits */
234#define MIIMU_SNRDY (1<<0)
235#define MIIMU_PREAMBLE (1<<1)
236#define MIIMU_OPMODE_WRITE (1<<2)
237#define MIIMU_MDC_PERIOD (1<<3)
238#define MIIMU_PHY_NRES (1<<4)
239#define MIIMU_RTA (1<<5)
240#define MIIMU_REGADDR(adr) (((adr) & 0x1f) << 6)
241#define MIIMU_PHYADDR(adr) (((adr) & 0x1f) << 11)
242#define MIIMU_DATA(data) (((data) & 0xffff) << 16)
243
244/*******************************
245 * xmac / xpec *
246 *******************************/
247
248/* XPEC register offsets relative to NETX_VA_XPEC(no) */
249#define NETX_XPEC_R0_OFS 0x00
250#define NETX_XPEC_R1_OFS 0x04
251#define NETX_XPEC_R2_OFS 0x08
252#define NETX_XPEC_R3_OFS 0x0c
253#define NETX_XPEC_R4_OFS 0x10
254#define NETX_XPEC_R5_OFS 0x14
255#define NETX_XPEC_R6_OFS 0x18
256#define NETX_XPEC_R7_OFS 0x1c
257#define NETX_XPEC_RANGE01_OFS 0x20
258#define NETX_XPEC_RANGE23_OFS 0x24
259#define NETX_XPEC_RANGE45_OFS 0x28
260#define NETX_XPEC_RANGE67_OFS 0x2c
261#define NETX_XPEC_PC_OFS 0x48
262#define NETX_XPEC_TIMER_OFS(timer) (0x30 + ((timer)<<2))
263#define NETX_XPEC_IRQ_OFS 0x8c
264#define NETX_XPEC_SYSTIME_NS_OFS 0x90
265#define NETX_XPEC_FIFO_DATA_OFS 0x94
266#define NETX_XPEC_SYSTIME_S_OFS 0x98
267#define NETX_XPEC_ADC_OFS 0x9c
268#define NETX_XPEC_URX_COUNT_OFS 0x40
269#define NETX_XPEC_UTX_COUNT_OFS 0x44
270#define NETX_XPEC_PC_OFS 0x48
271#define NETX_XPEC_ZERO_OFS 0x4c
272#define NETX_XPEC_STATCFG_OFS 0x50
273#define NETX_XPEC_EC_MASKA_OFS 0x54
274#define NETX_XPEC_EC_MASKB_OFS 0x58
275#define NETX_XPEC_EC_MASK0_OFS 0x5c
276#define NETX_XPEC_EC_MASK8_OFS 0x7c
277#define NETX_XPEC_EC_MASK9_OFS 0x80
278#define NETX_XPEC_XPU_HOLD_PC_OFS 0x100
279#define NETX_XPEC_RAM_START_OFS 0x2000
280
281/* Bits */
282#define XPU_HOLD_PC (1<<0)
283
284/* XMAC register offsets relative to NETX_VA_XMAC(no) */
285#define NETX_XMAC_RPU_PROGRAM_START_OFS 0x000
286#define NETX_XMAC_RPU_PROGRAM_END_OFS 0x3ff
287#define NETX_XMAC_TPU_PROGRAM_START_OFS 0x400
288#define NETX_XMAC_TPU_PROGRAM_END_OFS 0x7ff
289#define NETX_XMAC_RPU_HOLD_PC_OFS 0xa00
290#define NETX_XMAC_TPU_HOLD_PC_OFS 0xa04
291#define NETX_XMAC_STATUS_SHARED0_OFS 0x840
292#define NETX_XMAC_CONFIG_SHARED0_OFS 0x844
293#define NETX_XMAC_STATUS_SHARED1_OFS 0x848
294#define NETX_XMAC_CONFIG_SHARED1_OFS 0x84c
295#define NETX_XMAC_STATUS_SHARED2_OFS 0x850
296#define NETX_XMAC_CONFIG_SHARED2_OFS 0x854
297#define NETX_XMAC_STATUS_SHARED3_OFS 0x858
298#define NETX_XMAC_CONFIG_SHARED3_OFS 0x85c
299
300#define RPU_HOLD_PC (1<<15)
301#define TPU_HOLD_PC (1<<15)
302
303/*******************************
304 * Pointer FIFO *
305 *******************************/
306
307/* Registers */
308#define NETX_PFIFO_REG(ofs) IOMEM(NETX_VA_PFIFO + (ofs))
309#define NETX_PFIFO_BASE(pfifo) NETX_PFIFO_REG(0x00 + ((pfifo)<<2))
310#define NETX_PFIFO_BORDER_BASE(pfifo) NETX_PFIFO_REG(0x80 + ((pfifo)<<2))
311#define NETX_PFIFO_RESET NETX_PFIFO_REG(0x100)
312#define NETX_PFIFO_FULL NETX_PFIFO_REG(0x104)
313#define NETX_PFIFO_EMPTY NETX_PFIFO_REG(0x108)
314#define NETX_PFIFO_OVEFLOW NETX_PFIFO_REG(0x10c)
315#define NETX_PFIFO_UNDERRUN NETX_PFIFO_REG(0x110)
316#define NETX_PFIFO_FILL_LEVEL(pfifo) NETX_PFIFO_REG(0x180 + ((pfifo)<<2))
317#define NETX_PFIFO_XPEC_ISR(xpec) NETX_PFIFO_REG(0x400 + ((xpec) << 2))
318
319
320/*******************************
321 * Memory Controller *
322 *******************************/
323
324/* Registers */
325#define NETX_MEMCR_REG(ofs) IOMEM(NETX_VA_MEMCR + (ofs))
326#define NETX_MEMCR_SRAM_CTRL(cs) NETX_MEMCR_REG(0x0 + 4 * (cs)) /* SRAM for CS 0..2 */
327#define NETX_MEMCR_SDRAM_CFG_CTRL NETX_MEMCR_REG(0x40)
328#define NETX_MEMCR_SDRAM_TIMING_CTRL NETX_MEMCR_REG(0x44)
329#define NETX_MEMCR_SDRAM_MODE NETX_MEMCR_REG(0x48)
330#define NETX_MEMCR_SDRAM_EXT_MODE NETX_MEMCR_REG(0x4c)
331#define NETX_MEMCR_PRIO_TIMESLOT_CTRL NETX_MEMCR_REG(0x80)
332#define NETX_MEMCR_PRIO_ACCESS_CTRL NETX_MEMCR_REG(0x84)
333
334/* Bits */
335#define NETX_MEMCR_SRAM_CTRL_WIDTHEXTMEM(x) (((x) & 0x3) << 24)
336#define NETX_MEMCR_SRAM_CTRL_WSPOSTPAUSEEXTMEM(x) (((x) & 0x3) << 16)
337#define NETX_MEMCR_SRAM_CTRL_WSPREPASEEXTMEM(x) (((x) & 0x3) << 8)
338#define NETX_MEMCR_SRAM_CTRL_WSEXTMEM(x) (((x) & 0x1f) << 0)
339
340
341/*******************************
342 * Dual Port Memory *
343 *******************************/
344
345/* Registers */
346#define NETX_DPMAS_REG(ofs) IOMEM(NETX_VA_DPMAS + (ofs))
347#define NETX_DPMAS_SYS_STAT NETX_DPMAS_REG(0x4d8)
348#define NETX_DPMAS_INT_STAT NETX_DPMAS_REG(0x4e0)
349#define NETX_DPMAS_INT_EN NETX_DPMAS_REG(0x4f0)
350#define NETX_DPMAS_IF_CONF0 NETX_DPMAS_REG(0x608)
351#define NETX_DPMAS_IF_CONF1 NETX_DPMAS_REG(0x60c)
352#define NETX_DPMAS_EXT_CONFIG(cs) NETX_DPMAS_REG(0x610 + 4 * (cs))
353#define NETX_DPMAS_IO_MODE0 NETX_DPMAS_REG(0x620) /* I/O 32..63 */
354#define NETX_DPMAS_DRV_EN0 NETX_DPMAS_REG(0x624)
355#define NETX_DPMAS_DATA0 NETX_DPMAS_REG(0x628)
356#define NETX_DPMAS_IO_MODE1 NETX_DPMAS_REG(0x630) /* I/O 64..84 */
357#define NETX_DPMAS_DRV_EN1 NETX_DPMAS_REG(0x634)
358#define NETX_DPMAS_DATA1 NETX_DPMAS_REG(0x638)
359
360/* Bits */
361#define NETX_DPMAS_INT_EN_GLB_EN (1<<31)
362#define NETX_DPMAS_INT_EN_MEM_LCK (1<<30)
363#define NETX_DPMAS_INT_EN_WDG (1<<29)
364#define NETX_DPMAS_INT_EN_PIO72 (1<<28)
365#define NETX_DPMAS_INT_EN_PIO47 (1<<27)
366#define NETX_DPMAS_INT_EN_PIO40 (1<<26)
367#define NETX_DPMAS_INT_EN_PIO36 (1<<25)
368#define NETX_DPMAS_INT_EN_PIO35 (1<<24)
369
370#define NETX_DPMAS_IF_CONF0_HIF_DISABLED (0<<28)
371#define NETX_DPMAS_IF_CONF0_HIF_EXT_BUS (1<<28)
372#define NETX_DPMAS_IF_CONF0_HIF_UP_8BIT (2<<28)
373#define NETX_DPMAS_IF_CONF0_HIF_UP_16BIT (3<<28)
374#define NETX_DPMAS_IF_CONF0_HIF_IO (4<<28)
375#define NETX_DPMAS_IF_CONF0_WAIT_DRV_PP (1<<14)
376#define NETX_DPMAS_IF_CONF0_WAIT_DRV_OD (2<<14)
377#define NETX_DPMAS_IF_CONF0_WAIT_DRV_TRI (3<<14)
378
379#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO35 (1<<26)
380#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO36 (1<<27)
381#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO40 (1<<28)
382#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO47 (1<<29)
383#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO72 (1<<30)
384
385#define NETX_EXT_CONFIG_TALEWIDTH(x) (((x) & 0x7) << 29)
386#define NETX_EXT_CONFIG_TADRHOLD(x) (((x) & 0x7) << 26)
387#define NETX_EXT_CONFIG_TCSON(x) (((x) & 0x7) << 23)
388#define NETX_EXT_CONFIG_TRDON(x) (((x) & 0x7) << 20)
389#define NETX_EXT_CONFIG_TWRON(x) (((x) & 0x7) << 17)
390#define NETX_EXT_CONFIG_TWROFF(x) (((x) & 0x1f) << 12)
391#define NETX_EXT_CONFIG_TRDWRCYC(x) (((x) & 0x1f) << 7)
392#define NETX_EXT_CONFIG_WAIT_POL (1<<6)
393#define NETX_EXT_CONFIG_WAIT_EN (1<<5)
394#define NETX_EXT_CONFIG_NRD_MODE (1<<4)
395#define NETX_EXT_CONFIG_DS_MODE (1<<3)
396#define NETX_EXT_CONFIG_NWR_MODE (1<<2)
397#define NETX_EXT_CONFIG_16BIT (1<<1)
398#define NETX_EXT_CONFIG_CS_ENABLE (1<<0)
399
400#define NETX_DPMAS_IO_MODE0_WRL (1<<13)
401#define NETX_DPMAS_IO_MODE0_WAIT (1<<14)
402#define NETX_DPMAS_IO_MODE0_READY (1<<15)
403#define NETX_DPMAS_IO_MODE0_CS0 (1<<19)
404#define NETX_DPMAS_IO_MODE0_EXTRD (1<<20)
405
406#define NETX_DPMAS_IO_MODE1_CS2 (1<<15)
407#define NETX_DPMAS_IO_MODE1_CS1 (1<<16)
408#define NETX_DPMAS_IO_MODE1_SAMPLE_NPOR (0<<30)
409#define NETX_DPMAS_IO_MODE1_SAMPLE_100MHZ (1<<30)
410#define NETX_DPMAS_IO_MODE1_SAMPLE_NPIO36 (2<<30)
411#define NETX_DPMAS_IO_MODE1_SAMPLE_PIO36 (3<<30)
412
413/*******************************
414 * I2C *
415 *******************************/
416#define NETX_I2C_REG(ofs) IOMEM(NETX_VA_I2C, (ofs))
417#define NETX_I2C_CTRL NETX_I2C_REG(0x0)
418#define NETX_I2C_DATA NETX_I2C_REG(0x4)
419
420#endif /* __ASM_ARCH_NETX_REGS_H */
diff --git a/arch/arm/mach-netx/include/mach/pfifo.h b/arch/arm/mach-netx/include/mach/pfifo.h
deleted file mode 100644
index de23180bc937..000000000000
--- a/arch/arm/mach-netx/include/mach/pfifo.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/include/mach/pfifo.h
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8
9#ifndef ASM_ARCH_PFIFO_H
10#define ASM_ARCH_PFIFO_H
11
12static inline int pfifo_push(int no, unsigned int pointer)
13{
14 writel(pointer, NETX_PFIFO_BASE(no));
15 return 0;
16}
17
18static inline unsigned int pfifo_pop(int no)
19{
20 return readl(NETX_PFIFO_BASE(no));
21}
22
23static inline int pfifo_fill_level(int no)
24{
25
26 return readl(NETX_PFIFO_FILL_LEVEL(no));
27}
28
29static inline int pfifo_full(int no)
30{
31 return readl(NETX_PFIFO_FULL) & (1<<no) ? 1 : 0;
32}
33
34static inline int pfifo_empty(int no)
35{
36 return readl(NETX_PFIFO_EMPTY) & (1<<no) ? 1 : 0;
37}
38
39int pfifo_request(unsigned int pfifo_mask);
40void pfifo_free(unsigned int pfifo_mask);
41
42#endif /* ASM_ARCH_PFIFO_H */
diff --git a/arch/arm/mach-netx/include/mach/uncompress.h b/arch/arm/mach-netx/include/mach/uncompress.h
deleted file mode 100644
index edc1ac997eab..000000000000
--- a/arch/arm/mach-netx/include/mach/uncompress.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/include/mach/uncompress.h
4 *
5 * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8/*
9 * The following code assumes the serial port has already been
10 * initialized by the bootloader. We search for the first enabled
11 * port in the most probable order. If you didn't setup a port in
12 * your bootloader then nothing will appear (which might be desired).
13 *
14 * This does not append a newline
15 */
16
17#define REG(x) (*(volatile unsigned long *)(x))
18
19#define UART1_BASE 0x100a00
20#define UART2_BASE 0x100a80
21
22#define UART_DR 0x0
23
24#define UART_CR 0x14
25#define CR_UART_EN (1<<0)
26
27#define UART_FR 0x18
28#define FR_BUSY (1<<3)
29#define FR_TXFF (1<<5)
30
31static inline void putc(char c)
32{
33 unsigned long base;
34
35 if (REG(UART1_BASE + UART_CR) & CR_UART_EN)
36 base = UART1_BASE;
37 else if (REG(UART2_BASE + UART_CR) & CR_UART_EN)
38 base = UART2_BASE;
39 else
40 return;
41
42 while (REG(base + UART_FR) & FR_TXFF);
43 REG(base + UART_DR) = c;
44}
45
46static inline void flush(void)
47{
48 unsigned long base;
49
50 if (REG(UART1_BASE + UART_CR) & CR_UART_EN)
51 base = UART1_BASE;
52 else if (REG(UART2_BASE + UART_CR) & CR_UART_EN)
53 base = UART2_BASE;
54 else
55 return;
56
57 while (REG(base + UART_FR) & FR_BUSY);
58}
59
60/*
61 * nothing to do
62 */
63#define arch_decomp_setup()
diff --git a/arch/arm/mach-netx/include/mach/xc.h b/arch/arm/mach-netx/include/mach/xc.h
deleted file mode 100644
index 465d5e250ab8..000000000000
--- a/arch/arm/mach-netx/include/mach/xc.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/mach-netx/include/mach/xc.h
4 *
5 * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#ifndef __ASM_ARCH_XC_H
9#define __ASM_ARCH_XC_H
10
11struct xc {
12 int no;
13 unsigned int type;
14 unsigned int version;
15 void __iomem *xpec_base;
16 void __iomem *xmac_base;
17 void __iomem *sram_base;
18 int irq;
19 struct device *dev;
20};
21
22int xc_reset(struct xc *x);
23int xc_stop(struct xc* x);
24int xc_start(struct xc *x);
25int xc_running(struct xc *x);
26int xc_request_firmware(struct xc* x);
27struct xc* request_xc(int xcno, struct device *dev);
28void free_xc(struct xc *x);
29
30#endif /* __ASM_ARCH_XC_H */
diff --git a/arch/arm/mach-netx/nxdb500.c b/arch/arm/mach-netx/nxdb500.c
deleted file mode 100644
index ad5e6747b834..000000000000
--- a/arch/arm/mach-netx/nxdb500.c
+++ /dev/null
@@ -1,197 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/nxdb500.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/mtd/plat-ram.h>
12#include <linux/platform_device.h>
13#include <linux/amba/bus.h>
14#include <linux/amba/clcd.h>
15
16#include <mach/hardware.h>
17#include <asm/mach-types.h>
18#include <asm/mach/arch.h>
19#include <mach/netx-regs.h>
20#include <linux/platform_data/eth-netx.h>
21
22#include "generic.h"
23#include "fb.h"
24
25static struct clcd_panel qvga = {
26 .mode = {
27 .name = "QVGA",
28 .refresh = 60,
29 .xres = 240,
30 .yres = 320,
31 .pixclock = 187617,
32 .left_margin = 6,
33 .right_margin = 26,
34 .upper_margin = 0,
35 .lower_margin = 6,
36 .hsync_len = 6,
37 .vsync_len = 1,
38 .sync = 0,
39 .vmode = FB_VMODE_NONINTERLACED,
40 },
41 .width = -1,
42 .height = -1,
43 .tim2 = 16,
44 .cntl = CNTL_LCDTFT | CNTL_BGR,
45 .bpp = 16,
46 .grayscale = 0,
47};
48
49static inline int nxdb500_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
50{
51 var->green.length = 5;
52 var->green.msb_right = 0;
53
54 return clcdfb_check(fb, var);
55}
56
57static int nxdb500_clcd_setup(struct clcd_fb *fb)
58{
59 unsigned int val;
60
61 fb->fb.var.green.length = 5;
62 fb->fb.var.green.msb_right = 0;
63
64 /* enable asic control */
65 val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
66 writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
67
68 writel(3, NETX_SYSTEM_IOC_CR);
69
70 val = readl(NETX_PIO_OUTPIO);
71 writel(val | 1, NETX_PIO_OUTPIO);
72
73 val = readl(NETX_PIO_OEPIO);
74 writel(val | 1, NETX_PIO_OEPIO);
75 return netx_clcd_setup(fb);
76}
77
78static struct clcd_board clcd_data = {
79 .name = "netX",
80 .check = nxdb500_check,
81 .decode = clcdfb_decode,
82 .enable = netx_clcd_enable,
83 .setup = nxdb500_clcd_setup,
84 .mmap = netx_clcd_mmap,
85 .remove = netx_clcd_remove,
86};
87
88static struct netxeth_platform_data eth0_platform_data = {
89 .xcno = 0,
90};
91
92static struct platform_device netx_eth0_device = {
93 .name = "netx-eth",
94 .id = 0,
95 .num_resources = 0,
96 .resource = NULL,
97 .dev = {
98 .platform_data = &eth0_platform_data,
99 }
100};
101
102static struct netxeth_platform_data eth1_platform_data = {
103 .xcno = 1,
104};
105
106static struct platform_device netx_eth1_device = {
107 .name = "netx-eth",
108 .id = 1,
109 .num_resources = 0,
110 .resource = NULL,
111 .dev = {
112 .platform_data = &eth1_platform_data,
113 }
114};
115
116static struct resource netx_uart0_resources[] = {
117 [0] = {
118 .start = 0x00100A00,
119 .end = 0x00100A3F,
120 .flags = IORESOURCE_MEM,
121 },
122 [1] = {
123 .start = (NETX_IRQ_UART0),
124 .end = (NETX_IRQ_UART0),
125 .flags = IORESOURCE_IRQ,
126 },
127};
128
129static struct platform_device netx_uart0_device = {
130 .name = "netx-uart",
131 .id = 0,
132 .num_resources = ARRAY_SIZE(netx_uart0_resources),
133 .resource = netx_uart0_resources,
134};
135
136static struct resource netx_uart1_resources[] = {
137 [0] = {
138 .start = 0x00100A40,
139 .end = 0x00100A7F,
140 .flags = IORESOURCE_MEM,
141 },
142 [1] = {
143 .start = (NETX_IRQ_UART1),
144 .end = (NETX_IRQ_UART1),
145 .flags = IORESOURCE_IRQ,
146 },
147};
148
149static struct platform_device netx_uart1_device = {
150 .name = "netx-uart",
151 .id = 1,
152 .num_resources = ARRAY_SIZE(netx_uart1_resources),
153 .resource = netx_uart1_resources,
154};
155
156static struct resource netx_uart2_resources[] = {
157 [0] = {
158 .start = 0x00100A80,
159 .end = 0x00100ABF,
160 .flags = IORESOURCE_MEM,
161 },
162 [1] = {
163 .start = (NETX_IRQ_UART2),
164 .end = (NETX_IRQ_UART2),
165 .flags = IORESOURCE_IRQ,
166 },
167};
168
169static struct platform_device netx_uart2_device = {
170 .name = "netx-uart",
171 .id = 2,
172 .num_resources = ARRAY_SIZE(netx_uart2_resources),
173 .resource = netx_uart2_resources,
174};
175
176static struct platform_device *devices[] __initdata = {
177 &netx_eth0_device,
178 &netx_eth1_device,
179 &netx_uart0_device,
180 &netx_uart1_device,
181 &netx_uart2_device,
182};
183
184static void __init nxdb500_init(void)
185{
186 netx_fb_init(&clcd_data, &qvga);
187 platform_add_devices(devices, ARRAY_SIZE(devices));
188}
189
190MACHINE_START(NXDB500, "Hilscher nxdb500")
191 .atag_offset = 0x100,
192 .map_io = netx_map_io,
193 .init_irq = netx_init_irq,
194 .init_time = netx_timer_init,
195 .init_machine = nxdb500_init,
196 .restart = netx_restart,
197MACHINE_END
diff --git a/arch/arm/mach-netx/nxdkn.c b/arch/arm/mach-netx/nxdkn.c
deleted file mode 100644
index 917381559628..000000000000
--- a/arch/arm/mach-netx/nxdkn.c
+++ /dev/null
@@ -1,90 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/nxdkn.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/mtd/plat-ram.h>
12#include <linux/platform_device.h>
13#include <linux/amba/bus.h>
14#include <linux/amba/clcd.h>
15
16#include <mach/hardware.h>
17#include <asm/mach-types.h>
18#include <asm/mach/arch.h>
19#include <mach/netx-regs.h>
20#include <linux/platform_data/eth-netx.h>
21
22#include "generic.h"
23
24static struct netxeth_platform_data eth0_platform_data = {
25 .xcno = 0,
26};
27
28static struct platform_device nxdkn_eth0_device = {
29 .name = "netx-eth",
30 .id = 0,
31 .num_resources = 0,
32 .resource = NULL,
33 .dev = {
34 .platform_data = &eth0_platform_data,
35 }
36};
37
38static struct netxeth_platform_data eth1_platform_data = {
39 .xcno = 1,
40};
41
42static struct platform_device nxdkn_eth1_device = {
43 .name = "netx-eth",
44 .id = 1,
45 .num_resources = 0,
46 .resource = NULL,
47 .dev = {
48 .platform_data = &eth1_platform_data,
49 }
50};
51
52static struct resource netx_uart0_resources[] = {
53 [0] = {
54 .start = 0x00100A00,
55 .end = 0x00100A3F,
56 .flags = IORESOURCE_MEM,
57 },
58 [1] = {
59 .start = (NETX_IRQ_UART0),
60 .end = (NETX_IRQ_UART0),
61 .flags = IORESOURCE_IRQ,
62 },
63};
64
65static struct platform_device netx_uart0_device = {
66 .name = "netx-uart",
67 .id = 0,
68 .num_resources = ARRAY_SIZE(netx_uart0_resources),
69 .resource = netx_uart0_resources,
70};
71
72static struct platform_device *devices[] __initdata = {
73 &nxdkn_eth0_device,
74 &nxdkn_eth1_device,
75 &netx_uart0_device,
76};
77
78static void __init nxdkn_init(void)
79{
80 platform_add_devices(devices, ARRAY_SIZE(devices));
81}
82
83MACHINE_START(NXDKN, "Hilscher nxdkn")
84 .atag_offset = 0x100,
85 .map_io = netx_map_io,
86 .init_irq = netx_init_irq,
87 .init_time = netx_timer_init,
88 .init_machine = nxdkn_init,
89 .restart = netx_restart,
90MACHINE_END
diff --git a/arch/arm/mach-netx/nxeb500hmi.c b/arch/arm/mach-netx/nxeb500hmi.c
deleted file mode 100644
index aa0d5b2ca712..000000000000
--- a/arch/arm/mach-netx/nxeb500hmi.c
+++ /dev/null
@@ -1,174 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/nxeb500hmi.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/mtd/plat-ram.h>
12#include <linux/platform_device.h>
13#include <linux/amba/bus.h>
14#include <linux/amba/clcd.h>
15
16#include <mach/hardware.h>
17#include <asm/mach-types.h>
18#include <asm/mach/arch.h>
19#include <mach/netx-regs.h>
20#include <linux/platform_data/eth-netx.h>
21
22#include "generic.h"
23#include "fb.h"
24
25static struct clcd_panel qvga = {
26 .mode = {
27 .name = "QVGA",
28 .refresh = 60,
29 .xres = 240,
30 .yres = 320,
31 .pixclock = 187617,
32 .left_margin = 6,
33 .right_margin = 26,
34 .upper_margin = 0,
35 .lower_margin = 6,
36 .hsync_len = 6,
37 .vsync_len = 1,
38 .sync = 0,
39 .vmode = FB_VMODE_NONINTERLACED,
40 },
41 .width = -1,
42 .height = -1,
43 .tim2 = 16,
44 .cntl = CNTL_LCDTFT | CNTL_BGR,
45 .bpp = 16,
46 .grayscale = 0,
47};
48
49static inline int nxeb500hmi_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
50{
51 var->green.length = 5;
52 var->green.msb_right = 0;
53
54 return clcdfb_check(fb, var);
55}
56
57static int nxeb500hmi_clcd_setup(struct clcd_fb *fb)
58{
59 unsigned int val;
60
61 fb->fb.var.green.length = 5;
62 fb->fb.var.green.msb_right = 0;
63
64 /* enable asic control */
65 val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
66 writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
67
68 writel(3, NETX_SYSTEM_IOC_CR);
69
70 /* GPIO 14 is used for display enable on newer boards */
71 writel(9, NETX_GPIO_CFG(14));
72
73 val = readl(NETX_PIO_OUTPIO);
74 writel(val | 1, NETX_PIO_OUTPIO);
75
76 val = readl(NETX_PIO_OEPIO);
77 writel(val | 1, NETX_PIO_OEPIO);
78 return netx_clcd_setup(fb);
79}
80
81static struct clcd_board clcd_data = {
82 .name = "netX",
83 .check = nxeb500hmi_check,
84 .decode = clcdfb_decode,
85 .enable = netx_clcd_enable,
86 .setup = nxeb500hmi_clcd_setup,
87 .mmap = netx_clcd_mmap,
88 .remove = netx_clcd_remove,
89};
90
91static struct netxeth_platform_data eth0_platform_data = {
92 .xcno = 0,
93};
94
95static struct platform_device netx_eth0_device = {
96 .name = "netx-eth",
97 .id = 0,
98 .num_resources = 0,
99 .resource = NULL,
100 .dev = {
101 .platform_data = &eth0_platform_data,
102 }
103};
104
105static struct netxeth_platform_data eth1_platform_data = {
106 .xcno = 1,
107};
108
109static struct platform_device netx_eth1_device = {
110 .name = "netx-eth",
111 .id = 1,
112 .num_resources = 0,
113 .resource = NULL,
114 .dev = {
115 .platform_data = &eth1_platform_data,
116 }
117};
118
119static struct resource netx_cf_resources[] = {
120 [0] = {
121 .start = 0x20000000,
122 .end = 0x25ffffff,
123 .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
124 },
125};
126
127static struct platform_device netx_cf_device = {
128 .name = "netx-cf",
129 .id = 0,
130 .resource = netx_cf_resources,
131 .num_resources = ARRAY_SIZE(netx_cf_resources),
132};
133
134static struct resource netx_uart0_resources[] = {
135 [0] = {
136 .start = 0x00100A00,
137 .end = 0x00100A3F,
138 .flags = IORESOURCE_MEM,
139 },
140 [1] = {
141 .start = (NETX_IRQ_UART0),
142 .end = (NETX_IRQ_UART0),
143 .flags = IORESOURCE_IRQ,
144 },
145};
146
147static struct platform_device netx_uart0_device = {
148 .name = "netx-uart",
149 .id = 0,
150 .num_resources = ARRAY_SIZE(netx_uart0_resources),
151 .resource = netx_uart0_resources,
152};
153
154static struct platform_device *devices[] __initdata = {
155 &netx_eth0_device,
156 &netx_eth1_device,
157 &netx_cf_device,
158 &netx_uart0_device,
159};
160
161static void __init nxeb500hmi_init(void)
162{
163 netx_fb_init(&clcd_data, &qvga);
164 platform_add_devices(devices, ARRAY_SIZE(devices));
165}
166
167MACHINE_START(NXEB500HMI, "Hilscher nxeb500hmi")
168 .atag_offset = 0x100,
169 .map_io = netx_map_io,
170 .init_irq = netx_init_irq,
171 .init_time = netx_timer_init,
172 .init_machine = nxeb500hmi_init,
173 .restart = netx_restart,
174MACHINE_END
diff --git a/arch/arm/mach-netx/pfifo.c b/arch/arm/mach-netx/pfifo.c
deleted file mode 100644
index 2e5cc777329f..000000000000
--- a/arch/arm/mach-netx/pfifo.c
+++ /dev/null
@@ -1,56 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/pfifo.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/io.h>
12
13#include <mach/hardware.h>
14#include <mach/netx-regs.h>
15#include <mach/pfifo.h>
16
17static DEFINE_MUTEX(pfifo_lock);
18
19static unsigned int pfifo_used = 0;
20
21int pfifo_request(unsigned int pfifo_mask)
22{
23 int err = 0;
24 unsigned int val;
25
26 mutex_lock(&pfifo_lock);
27
28 if (pfifo_mask & pfifo_used) {
29 err = -EBUSY;
30 goto out;
31 }
32
33 pfifo_used |= pfifo_mask;
34
35 val = readl(NETX_PFIFO_RESET);
36 writel(val | pfifo_mask, NETX_PFIFO_RESET);
37 writel(val, NETX_PFIFO_RESET);
38
39out:
40 mutex_unlock(&pfifo_lock);
41 return err;
42}
43
44void pfifo_free(unsigned int pfifo_mask)
45{
46 mutex_lock(&pfifo_lock);
47 pfifo_used &= ~pfifo_mask;
48 mutex_unlock(&pfifo_lock);
49}
50
51EXPORT_SYMBOL(pfifo_push);
52EXPORT_SYMBOL(pfifo_pop);
53EXPORT_SYMBOL(pfifo_fill_level);
54EXPORT_SYMBOL(pfifo_empty);
55EXPORT_SYMBOL(pfifo_request);
56EXPORT_SYMBOL(pfifo_free);
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
deleted file mode 100644
index d9defa1ab605..000000000000
--- a/arch/arm/mach-netx/time.c
+++ /dev/null
@@ -1,141 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/time.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/init.h>
9#include <linux/interrupt.h>
10#include <linux/irq.h>
11#include <linux/clocksource.h>
12#include <linux/clockchips.h>
13#include <linux/io.h>
14
15#include <mach/hardware.h>
16#include <asm/mach/time.h>
17#include <mach/netx-regs.h>
18
19#define NETX_CLOCK_FREQ 100000000
20#define NETX_LATCH DIV_ROUND_CLOSEST(NETX_CLOCK_FREQ, HZ)
21
22#define TIMER_CLOCKEVENT 0
23#define TIMER_CLOCKSOURCE 1
24
25static inline void timer_shutdown(struct clock_event_device *evt)
26{
27 /* disable timer */
28 writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
29}
30
31static int netx_shutdown(struct clock_event_device *evt)
32{
33 timer_shutdown(evt);
34
35 return 0;
36}
37
38static int netx_set_oneshot(struct clock_event_device *evt)
39{
40 u32 tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN;
41
42 timer_shutdown(evt);
43 writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
44 writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
45
46 return 0;
47}
48
49static int netx_set_periodic(struct clock_event_device *evt)
50{
51 u32 tmode = NETX_GPIO_COUNTER_CTRL_RST_EN |
52 NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN;
53
54 timer_shutdown(evt);
55 writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
56 writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
57
58 return 0;
59}
60
61static int netx_set_next_event(unsigned long evt,
62 struct clock_event_device *clk)
63{
64 writel(0 - evt, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKEVENT));
65 return 0;
66}
67
68static struct clock_event_device netx_clockevent = {
69 .name = "netx-timer" __stringify(TIMER_CLOCKEVENT),
70 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
71 .set_next_event = netx_set_next_event,
72 .set_state_shutdown = netx_shutdown,
73 .set_state_periodic = netx_set_periodic,
74 .set_state_oneshot = netx_set_oneshot,
75 .tick_resume = netx_shutdown,
76};
77
78/*
79 * IRQ handler for the timer
80 */
81static irqreturn_t
82netx_timer_interrupt(int irq, void *dev_id)
83{
84 struct clock_event_device *evt = &netx_clockevent;
85
86 /* acknowledge interrupt */
87 writel(COUNTER_BIT(0), NETX_GPIO_IRQ);
88
89 evt->event_handler(evt);
90
91 return IRQ_HANDLED;
92}
93
94static struct irqaction netx_timer_irq = {
95 .name = "NetX Timer Tick",
96 .flags = IRQF_TIMER | IRQF_IRQPOLL,
97 .handler = netx_timer_interrupt,
98};
99
100/*
101 * Set up timer interrupt
102 */
103void __init netx_timer_init(void)
104{
105 /* disable timer initially */
106 writel(0, NETX_GPIO_COUNTER_CTRL(0));
107
108 /* Reset the timer value to zero */
109 writel(0, NETX_GPIO_COUNTER_CURRENT(0));
110
111 writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(0));
112
113 /* acknowledge interrupt */
114 writel(COUNTER_BIT(0), NETX_GPIO_IRQ);
115
116 /* Enable the interrupt in the specific timer
117 * register and start timer
118 */
119 writel(COUNTER_BIT(0), NETX_GPIO_IRQ_ENABLE);
120 writel(NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN,
121 NETX_GPIO_COUNTER_CTRL(0));
122
123 setup_irq(NETX_IRQ_TIMER0, &netx_timer_irq);
124
125 /* Setup timer one for clocksource */
126 writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE));
127 writel(0, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE));
128 writel(0xffffffff, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKSOURCE));
129
130 writel(NETX_GPIO_COUNTER_CTRL_RUN,
131 NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE));
132
133 clocksource_mmio_init(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE),
134 "netx_timer", NETX_CLOCK_FREQ, 200, 32, clocksource_mmio_readl_up);
135
136 /* with max_delta_ns >= delta2ns(0x800) the system currently runs fine.
137 * Adding some safety ... */
138 netx_clockevent.cpumask = cpumask_of(0);
139 clockevents_config_and_register(&netx_clockevent, NETX_CLOCK_FREQ,
140 0xa00, 0xfffffffe);
141}
diff --git a/arch/arm/mach-netx/xc.c b/arch/arm/mach-netx/xc.c
deleted file mode 100644
index 885a618b2651..000000000000
--- a/arch/arm/mach-netx/xc.c
+++ /dev/null
@@ -1,246 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mach-netx/xc.c
4 *
5 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 */
7
8#include <linux/init.h>
9#include <linux/device.h>
10#include <linux/firmware.h>
11#include <linux/mutex.h>
12#include <linux/slab.h>
13#include <linux/io.h>
14#include <linux/export.h>
15
16#include <mach/hardware.h>
17#include <mach/irqs.h>
18#include <mach/netx-regs.h>
19
20#include <mach/xc.h>
21
22static DEFINE_MUTEX(xc_lock);
23
24static int xc_in_use = 0;
25
26struct fw_desc {
27 unsigned int ofs;
28 unsigned int size;
29 unsigned int patch_ofs;
30 unsigned int patch_entries;
31};
32
33struct fw_header {
34 unsigned int magic;
35 unsigned int type;
36 unsigned int version;
37 unsigned int reserved[5];
38 struct fw_desc fw_desc[3];
39} __attribute__ ((packed));
40
41int xc_stop(struct xc *x)
42{
43 writel(RPU_HOLD_PC, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS);
44 writel(TPU_HOLD_PC, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS);
45 writel(XPU_HOLD_PC, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS);
46 return 0;
47}
48
49int xc_start(struct xc *x)
50{
51 writel(0, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS);
52 writel(0, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS);
53 writel(0, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS);
54 return 0;
55}
56
57int xc_running(struct xc *x)
58{
59 return (readl(x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS) & RPU_HOLD_PC)
60 || (readl(x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS) & TPU_HOLD_PC)
61 || (readl(x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS) & XPU_HOLD_PC) ?
62 0 : 1;
63}
64
65int xc_reset(struct xc *x)
66{
67 writel(0, x->xpec_base + NETX_XPEC_PC_OFS);
68 return 0;
69}
70
71static int xc_check_ptr(struct xc *x, unsigned long adr, unsigned int size)
72{
73 if (adr >= NETX_PA_XMAC(x->no) &&
74 adr + size < NETX_PA_XMAC(x->no) + XMAC_MEM_SIZE)
75 return 0;
76
77 if (adr >= NETX_PA_XPEC(x->no) &&
78 adr + size < NETX_PA_XPEC(x->no) + XPEC_MEM_SIZE)
79 return 0;
80
81 dev_err(x->dev, "Illegal pointer in firmware found. aborting\n");
82
83 return -1;
84}
85
86static int xc_patch(struct xc *x, const void *patch, int count)
87{
88 unsigned int val, adr;
89 const unsigned int *data = patch;
90
91 int i;
92 for (i = 0; i < count; i++) {
93 adr = *data++;
94 val = *data++;
95 if (xc_check_ptr(x, adr, 4) < 0)
96 return -EINVAL;
97
98 writel(val, (void __iomem *)io_p2v(adr));
99 }
100 return 0;
101}
102
103int xc_request_firmware(struct xc *x)
104{
105 int ret;
106 char name[16];
107 const struct firmware *fw;
108 struct fw_header *head;
109 unsigned int size;
110 int i;
111 const void *src;
112 unsigned long dst;
113
114 sprintf(name, "xc%d.bin", x->no);
115
116 ret = request_firmware(&fw, name, x->dev);
117
118 if (ret < 0) {
119 dev_err(x->dev, "request_firmware failed\n");
120 return ret;
121 }
122
123 head = (struct fw_header *)fw->data;
124 if (head->magic != 0x4e657458) {
125 if (head->magic == 0x5874654e) {
126 dev_err(x->dev,
127 "firmware magic is 'XteN'. Endianness problems?\n");
128 ret = -ENODEV;
129 goto exit_release_firmware;
130 }
131 dev_err(x->dev, "unrecognized firmware magic 0x%08x\n",
132 head->magic);
133 ret = -ENODEV;
134 goto exit_release_firmware;
135 }
136
137 x->type = head->type;
138 x->version = head->version;
139
140 ret = -EINVAL;
141
142 for (i = 0; i < 3; i++) {
143 src = fw->data + head->fw_desc[i].ofs;
144 dst = *(unsigned int *)src;
145 src += sizeof (unsigned int);
146 size = head->fw_desc[i].size - sizeof (unsigned int);
147
148 if (xc_check_ptr(x, dst, size))
149 goto exit_release_firmware;
150
151 memcpy((void *)io_p2v(dst), src, size);
152
153 src = fw->data + head->fw_desc[i].patch_ofs;
154 size = head->fw_desc[i].patch_entries;
155 ret = xc_patch(x, src, size);
156 if (ret < 0)
157 goto exit_release_firmware;
158 }
159
160 ret = 0;
161
162 exit_release_firmware:
163 release_firmware(fw);
164
165 return ret;
166}
167
168struct xc *request_xc(int xcno, struct device *dev)
169{
170 struct xc *x = NULL;
171
172 mutex_lock(&xc_lock);
173
174 if (xcno > 3)
175 goto exit;
176 if (xc_in_use & (1 << xcno))
177 goto exit;
178
179 x = kmalloc(sizeof (struct xc), GFP_KERNEL);
180 if (!x)
181 goto exit;
182
183 if (!request_mem_region
184 (NETX_PA_XPEC(xcno), XPEC_MEM_SIZE, kobject_name(&dev->kobj)))
185 goto exit_free;
186
187 if (!request_mem_region
188 (NETX_PA_XMAC(xcno), XMAC_MEM_SIZE, kobject_name(&dev->kobj)))
189 goto exit_release_1;
190
191 if (!request_mem_region
192 (SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE, kobject_name(&dev->kobj)))
193 goto exit_release_2;
194
195 x->xpec_base = (void * __iomem)io_p2v(NETX_PA_XPEC(xcno));
196 x->xmac_base = (void * __iomem)io_p2v(NETX_PA_XMAC(xcno));
197 x->sram_base = ioremap(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE);
198 if (!x->sram_base)
199 goto exit_release_3;
200
201 x->irq = NETX_IRQ_XPEC(xcno);
202
203 x->no = xcno;
204 x->dev = dev;
205
206 xc_in_use |= (1 << xcno);
207
208 goto exit;
209
210 exit_release_3:
211 release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE);
212 exit_release_2:
213 release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE);
214 exit_release_1:
215 release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE);
216 exit_free:
217 kfree(x);
218 x = NULL;
219 exit:
220 mutex_unlock(&xc_lock);
221 return x;
222}
223
224void free_xc(struct xc *x)
225{
226 int xcno = x->no;
227
228 mutex_lock(&xc_lock);
229
230 iounmap(x->sram_base);
231 release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE);
232 release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE);
233 release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE);
234 xc_in_use &= ~(1 << x->no);
235 kfree(x);
236
237 mutex_unlock(&xc_lock);
238}
239
240EXPORT_SYMBOL(free_xc);
241EXPORT_SYMBOL(request_xc);
242EXPORT_SYMBOL(xc_request_firmware);
243EXPORT_SYMBOL(xc_reset);
244EXPORT_SYMBOL(xc_running);
245EXPORT_SYMBOL(xc_start);
246EXPORT_SYMBOL(xc_stop);
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 81159af44862..14a6c3eb3298 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -126,6 +126,8 @@ restart:
126 orr r11, r11, r13 @ mask all requested interrupts 126 orr r11, r11, r13 @ mask all requested interrupts
127 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 127 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
128 128
129 str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
130
129 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? 131 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
130 beq hksw @ no - try next source 132 beq hksw @ no - try next source
131 133
@@ -133,7 +135,6 @@ restart:
133 @@@@@@@@@@@@@@@@@@@@@@ 135 @@@@@@@@@@@@@@@@@@@@@@
134 @ Keyboard clock FIQ mode interrupt handler 136 @ Keyboard clock FIQ mode interrupt handler
135 @ r10 now contains KEYBRD_CLK_MASK, use it 137 @ r10 now contains KEYBRD_CLK_MASK, use it
136 str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
137 bic r11, r11, r10 @ unmask it 138 bic r11, r11, r10 @ unmask it
138 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 139 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
139 140
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index 43899fa56674..0254eb9cf8c6 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -70,9 +70,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
70 * interrupts default to since commit 80ac93c27441 70 * interrupts default to since commit 80ac93c27441
71 * requires interrupt already acked and unmasked. 71 * requires interrupt already acked and unmasked.
72 */ 72 */
73 if (irq_chip->irq_ack) 73 if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
74 irq_chip->irq_ack(d);
75 if (irq_chip->irq_unmask)
76 irq_chip->irq_unmask(d); 74 irq_chip->irq_unmask(d);
77 } 75 }
78 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) 76 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index f9c02f9f1c92..5c3845730dbf 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
127 struct device_node *np; 127 struct device_node *np;
128 struct gen_pool *sram_pool; 128 struct gen_pool *sram_pool;
129 129
130 if (!soc_is_omap44xx() && !soc_is_omap54xx())
131 return 0;
132
130 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); 133 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
131 if (!np) 134 if (!np)
132 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", 135 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 4a5b4aee6615..1ec21e9ba1e9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { 379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
380 .rev_offs = 0x0, 380 .rev_offs = 0x0,
381 .sysc_offs = 0x4, 381 .sysc_offs = 0x4,
382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, 382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
383 SYSC_HAS_RESET_STATUS,
383 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), 384 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
384 .sysc_fields = &omap_hwmod_sysc_type2, 385 .sysc_fields = &omap_hwmod_sysc_type2,
385}; 386};
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 0ce56ad754ce..ea2c84214bac 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
46 switch (tag->u.acorn.vram_pages) { 46 switch (tag->u.acorn.vram_pages) {
47 case 512: 47 case 512:
48 vram_size += PAGE_SIZE * 256; 48 vram_size += PAGE_SIZE * 256;
49 /* Fall through - ??? */
49 case 256: 50 case 256:
50 vram_size += PAGE_SIZE * 256; 51 vram_size += PAGE_SIZE * 256;
51 default: 52 default:
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 5a67a71f80cc..76a65df42d10 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void)
70 switch (err) { 70 switch (err) {
71 case -ENOSYS: 71 case -ENOSYS:
72 tegra_cpu_reset_handler_set(reset_address); 72 tegra_cpu_reset_handler_set(reset_address);
73 /* pass-through */ 73 /* fall through */
74 case 0: 74 case 0:
75 is_enabled = true; 75 is_enabled = true;
76 break; 76 break;
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 820b60a50125..c1222c0e9fd3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -663,6 +663,7 @@ config ARM_LPAE
663 depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ 663 depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \
664 !CPU_32v4 && !CPU_32v3 664 !CPU_32v4 && !CPU_32v3
665 select PHYS_ADDR_T_64BIT 665 select PHYS_ADDR_T_64BIT
666 select SWIOTLB
666 help 667 help
667 Say Y if you have an ARMv7 processor supporting the LPAE page 668 Say Y if you have an ARMv7 processor supporting the LPAE page
668 table format and you would like to access memory beyond the 669 table format and you would like to access memory beyond the
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 8cdb78642e93..04b36436cbc0 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -695,7 +695,7 @@ thumb2arm(u16 tinstr)
695 return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] | 695 return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
696 (tinstr & 255); /* register_list */ 696 (tinstr & 255); /* register_list */
697 } 697 }
698 /* Else fall through for illegal instruction case */ 698 /* Else, fall through - for illegal instruction case */
699 699
700 default: 700 default:
701 return BAD_INSTR; 701 return BAD_INSTR;
@@ -751,6 +751,8 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
751 case 0xe8e0: 751 case 0xe8e0:
752 case 0xe9e0: 752 case 0xe9e0:
753 poffset->un = (tinst2 & 0xff) << 2; 753 poffset->un = (tinst2 & 0xff) << 2;
754 /* Fall through */
755
754 case 0xe940: 756 case 0xe940:
755 case 0xe9c0: 757 case 0xe9c0:
756 return do_alignment_ldrdstrd; 758 return do_alignment_ldrdstrd;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4789c60a86e3..d42557ee69c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/dma-noncoherent.h>
18#include <linux/dma-contiguous.h> 19#include <linux/dma-contiguous.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20#include <linux/memblock.h> 21#include <linux/memblock.h>
@@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask)
1125 1126
1126static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 1127static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1127{ 1128{
1129 /*
1130 * When CONFIG_ARM_LPAE is set, physical address can extend above
1131 * 32-bits, which then can't be addressed by devices that only support
1132 * 32-bit DMA.
1133 * Use the generic dma-direct / swiotlb ops code in that case, as that
1134 * handles bounce buffering for us.
1135 *
1136 * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
1137 * latter is also selected by the Xen code, but that code for now relies
1138 * on non-NULL dev_dma_ops. To be cleaned up later.
1139 */
1140 if (IS_ENABLED(CONFIG_ARM_LPAE))
1141 return NULL;
1128 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 1142 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1129} 1143}
1130 1144
@@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2329 const struct dma_map_ops *dma_ops; 2343 const struct dma_map_ops *dma_ops;
2330 2344
2331 dev->archdata.dma_coherent = coherent; 2345 dev->archdata.dma_coherent = coherent;
2346#ifdef CONFIG_SWIOTLB
2347 dev->dma_coherent = coherent;
2348#endif
2332 2349
2333 /* 2350 /*
2334 * Don't override the dma_ops if they have already been set. Ideally 2351 * Don't override the dma_ops if they have already been set. Ideally
@@ -2363,3 +2380,45 @@ void arch_teardown_dma_ops(struct device *dev)
2363 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2380 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2364 set_dma_ops(dev, NULL); 2381 set_dma_ops(dev, NULL);
2365} 2382}
2383
2384#ifdef CONFIG_SWIOTLB
2385void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
2386 size_t size, enum dma_data_direction dir)
2387{
2388 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2389 size, dir);
2390}
2391
2392void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
2393 size_t size, enum dma_data_direction dir)
2394{
2395 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2396 size, dir);
2397}
2398
2399long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
2400 dma_addr_t dma_addr)
2401{
2402 return dma_to_pfn(dev, dma_addr);
2403}
2404
2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
2406 unsigned long attrs)
2407{
2408 return __get_dma_pgprot(attrs, prot);
2409}
2410
2411void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
2412 gfp_t gfp, unsigned long attrs)
2413{
2414 return __dma_alloc(dev, size, dma_handle, gfp,
2415 __get_dma_pgprot(attrs, PAGE_KERNEL), false,
2416 attrs, __builtin_return_address(0));
2417}
2418
2419void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
2420 dma_addr_t dma_handle, unsigned long attrs)
2421{
2422 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
2423}
2424#endif /* CONFIG_SWIOTLB */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 4920a206dce9..b4be3baa83d4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -21,6 +21,7 @@
21#include <linux/dma-contiguous.h> 21#include <linux/dma-contiguous.h>
22#include <linux/sizes.h> 22#include <linux/sizes.h>
23#include <linux/stop_machine.h> 23#include <linux/stop_machine.h>
24#include <linux/swiotlb.h>
24 25
25#include <asm/cp15.h> 26#include <asm/cp15.h>
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
@@ -174,6 +175,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
174#ifdef CONFIG_HAVE_ARCH_PFN_VALID 175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
175int pfn_valid(unsigned long pfn) 176int pfn_valid(unsigned long pfn)
176{ 177{
178 phys_addr_t addr = __pfn_to_phys(pfn);
179
180 if (__phys_to_pfn(addr) != pfn)
181 return 0;
182
177 return memblock_is_map_memory(__pfn_to_phys(pfn)); 183 return memblock_is_map_memory(__pfn_to_phys(pfn));
178} 184}
179EXPORT_SYMBOL(pfn_valid); 185EXPORT_SYMBOL(pfn_valid);
@@ -463,6 +469,10 @@ static void __init free_highpages(void)
463 */ 469 */
464void __init mem_init(void) 470void __init mem_init(void)
465{ 471{
472#ifdef CONFIG_ARM_LPAE
473 swiotlb_init(1);
474#endif
475
466 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 476 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
467 477
468 /* this will put all unused low memory onto the freelists */ 478 /* this will put all unused low memory onto the freelists */
@@ -623,7 +633,8 @@ static void update_sections_early(struct section_perm perms[], int n)
623 if (t->flags & PF_KTHREAD) 633 if (t->flags & PF_KTHREAD)
624 continue; 634 continue;
625 for_each_thread(t, s) 635 for_each_thread(t, s)
626 set_section_perms(perms, n, true, s->mm); 636 if (s->mm)
637 set_section_perms(perms, n, true, s->mm);
627 } 638 }
628 set_section_perms(perms, n, true, current->active_mm); 639 set_section_perms(perms, n, true, current->active_mm);
629 set_section_perms(perms, n, true, &init_mm); 640 set_section_perms(perms, n, true, &init_mm);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 79f43acf9acb..08c99413d02c 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -388,17 +388,15 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
388 /* 388 /*
389 * not supported by current hardware on OMAP1 389 * not supported by current hardware on OMAP1
390 * w |= (0x03 << 7); 390 * w |= (0x03 << 7);
391 * fall through
392 */ 391 */
392 /* fall through */
393 case OMAP_DMA_DATA_BURST_16: 393 case OMAP_DMA_DATA_BURST_16:
394 if (dma_omap2plus()) { 394 if (dma_omap2plus()) {
395 burst = 0x3; 395 burst = 0x3;
396 break; 396 break;
397 } 397 }
398 /* 398 /* OMAP1 don't support burst 16 */
399 * OMAP1 don't support burst 16 399 /* fall through */
400 * fall through
401 */
402 default: 400 default:
403 BUG(); 401 BUG();
404 } 402 }
@@ -474,10 +472,8 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
474 burst = 0x3; 472 burst = 0x3;
475 break; 473 break;
476 } 474 }
477 /* 475 /* OMAP1 don't support burst 16 */
478 * OMAP1 don't support burst 16 476 /* fall through */
479 * fall through
480 */
481 default: 477 default:
482 printk(KERN_ERR "Invalid DMA burst mode\n"); 478 printk(KERN_ERR "Invalid DMA burst mode\n");
483 BUG(); 479 BUG();
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index bb1f1dbb34e8..61de992bbea3 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
52 52
53 ifeq ($(CONFIG_CC_IS_CLANG), y) 53 ifeq ($(CONFIG_CC_IS_CLANG), y)
54 $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built) 54 $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
55 else ifeq ($(CROSS_COMPILE_COMPAT),) 55 else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
56 $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built) 56 $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
57 else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),) 57 else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
58 $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT) 58 $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index c7a87368850b..12aa7eaeaf68 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -339,6 +339,12 @@
339 pinctrl-names = "default"; 339 pinctrl-names = "default";
340}; 340};
341 341
342&ir {
343 status = "okay";
344 pinctrl-0 = <&remote_input_ao_pins>;
345 pinctrl-names = "default";
346};
347
342&pwm_ef { 348&pwm_ef {
343 status = "okay"; 349 status = "okay";
344 pinctrl-0 = <&pwm_e_pins>; 350 pinctrl-0 = <&pwm_e_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index f8d43e3dcf20..1785552d450c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -2386,6 +2386,7 @@
2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; 2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
2387 clock-names = "ddr"; 2387 clock-names = "ddr";
2388 phys = <&usb2_phy1>; 2388 phys = <&usb2_phy1>;
2389 phy-names = "usb2-phy";
2389 dr_mode = "peripheral"; 2390 dr_mode = "peripheral";
2390 g-rx-fifo-size = <192>; 2391 g-rx-fifo-size = <192>;
2391 g-np-tx-fifo-size = <128>; 2392 g-np-tx-fifo-size = <128>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
index 81780ffcc7f0..4e916e1f71f7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
@@ -53,6 +53,7 @@
53 53
54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>; 54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>;
55 enable-active-high; 55 enable-active-high;
56 regulator-always-on;
56 }; 57 };
57 58
58 tf_io: gpio-regulator-tf_io { 59 tf_io: gpio-regulator-tf_io {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index e25f7fcd7997..cffa8991880d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -462,7 +462,7 @@
462#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0 462#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
463#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0 463#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
464#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0 464#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
465#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0 465#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0
466#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2 466#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
467#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0 467#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
468#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0 468#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
@@ -472,7 +472,7 @@
472#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0 472#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
473#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0 473#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
474#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0 474#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
475#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0 475#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0
476#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2 476#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
477#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0 477#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
478#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0 478#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index d09b808eff87..52aae341d0da 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -715,8 +715,7 @@
715 715
716 sai2: sai@308b0000 { 716 sai2: sai@308b0000 {
717 #sound-dai-cells = <0>; 717 #sound-dai-cells = <0>;
718 compatible = "fsl,imx8mq-sai", 718 compatible = "fsl,imx8mq-sai";
719 "fsl,imx6sx-sai";
720 reg = <0x308b0000 0x10000>; 719 reg = <0x308b0000 0x10000>;
721 interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; 720 interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
722 clocks = <&clk IMX8MQ_CLK_SAI2_IPG>, 721 clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 79155a8cfe7c..89e4c8b79349 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
155 BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | 155 BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
156 GIC_PRIO_PSR_I_SET)); 156 GIC_PRIO_PSR_I_SET));
157 BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); 157 BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
158 /*
159 * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
160 * and non-secure PMR accesses are not subject to the shifts that
161 * are applied to IRQ priorities
162 */
163 BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
158 gic_write_pmr(GIC_PRIO_IRQOFF); 164 gic_write_pmr(GIC_PRIO_IRQOFF);
159} 165}
160 166
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 407e2bf23676..c96ffa4722d3 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -35,9 +35,10 @@
35 */ 35 */
36 36
37enum ftr_type { 37enum ftr_type {
38 FTR_EXACT, /* Use a predefined safe value */ 38 FTR_EXACT, /* Use a predefined safe value */
39 FTR_LOWER_SAFE, /* Smaller value is safe */ 39 FTR_LOWER_SAFE, /* Smaller value is safe */
40 FTR_HIGHER_SAFE,/* Bigger value is safe */ 40 FTR_HIGHER_SAFE, /* Bigger value is safe */
41 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
41}; 42};
42 43
43#define FTR_STRICT true /* SANITY check strict matching required */ 44#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 987926ed535e..063c964af705 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -13,6 +13,8 @@
13#define DAIF_PROCCTX 0 13#define DAIF_PROCCTX 0
14#define DAIF_PROCCTX_NOIRQ PSR_I_BIT 14#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
15#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) 15#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
16#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
17
16 18
17/* mask/save/unmask/restore all exceptions, including interrupts. */ 19/* mask/save/unmask/restore all exceptions, including interrupts. */
18static inline void local_daif_mask(void) 20static inline void local_daif_mask(void)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8e79ce9c3f5c..76a144702586 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
105 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) 105 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
106 106
107#define alloc_screen_info(x...) &screen_info 107#define alloc_screen_info(x...) &screen_info
108#define free_screen_info(x...) 108
109static inline void free_screen_info(efi_system_table_t *sys_table_arg,
110 struct screen_info *si)
111{
112}
109 113
110/* redeclare as 'hidden' so the compiler will generate relative references */ 114/* redeclare as 'hidden' so the compiler will generate relative references */
111extern struct screen_info screen_info __attribute__((__visibility__("hidden"))); 115extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 3c7037c6ba9b..b618017205a3 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
202({ \ 202({ \
203 set_thread_flag(TIF_32BIT); \ 203 set_thread_flag(TIF_32BIT); \
204 }) 204 })
205#ifdef CONFIG_GENERIC_COMPAT_VDSO 205#ifdef CONFIG_COMPAT_VDSO
206#define COMPAT_ARCH_DLINFO \ 206#define COMPAT_ARCH_DLINFO \
207do { \ 207do { \
208 /* \ 208 /* \
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index a8b205e5c4a8..ddf9d762ac62 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -316,9 +316,10 @@
316 316
317#define kvm_arm_exception_class \ 317#define kvm_arm_exception_class \
318 ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \ 318 ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
319 ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(CP14_64), ECN(SVC64), \ 319 ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
320 ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(IMP_DEF), ECN(IABT_LOW), \ 320 ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
321 ECN(IABT_CUR), ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \ 321 ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
322 ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
322 ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \ 323 ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
323 ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \ 324 ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
324 ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ 325 ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b7ba75809751..fb04f10a78ab 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -210,7 +210,11 @@ extern u64 vabits_user;
210#define __tag_reset(addr) untagged_addr(addr) 210#define __tag_reset(addr) untagged_addr(addr)
211#define __tag_get(addr) (__u8)((u64)(addr) >> 56) 211#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
212#else 212#else
213#define __tag_set(addr, tag) (addr) 213static inline const void *__tag_set(const void *addr, u8 tag)
214{
215 return addr;
216}
217
214#define __tag_reset(addr) (addr) 218#define __tag_reset(addr) (addr)
215#define __tag_get(addr) 0 219#define __tag_get(addr) 0
216#endif 220#endif
@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
301#define page_to_virt(page) ({ \ 305#define page_to_virt(page) ({ \
302 unsigned long __addr = \ 306 unsigned long __addr = \
303 ((__page_to_voff(page)) | PAGE_OFFSET); \ 307 ((__page_to_voff(page)) | PAGE_OFFSET); \
304 unsigned long __addr_tag = \ 308 const void *__addr_tag = \
305 __tag_set(__addr, page_kasan_tag(page)); \ 309 __tag_set((void *)__addr, page_kasan_tag(page)); \
306 ((void *)__addr_tag); \ 310 ((void *)__addr_tag); \
307}) 311})
308 312
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 87a4b2ddc1a1..e09760ece844 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -209,7 +209,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
209 209
210static inline pte_t pte_mkdevmap(pte_t pte) 210static inline pte_t pte_mkdevmap(pte_t pte)
211{ 211{
212 return set_pte_bit(pte, __pgprot(PTE_DEVMAP)); 212 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
213} 213}
214 214
215static inline void set_pte(pte_t *ptep, pte_t pte) 215static inline void set_pte(pte_t *ptep, pte_t pte)
@@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
301/* 301/*
302 * Huge pte definitions. 302 * Huge pte definitions.
303 */ 303 */
304#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
305#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 304#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
306 305
307/* 306/*
@@ -397,7 +396,10 @@ static inline int pmd_protnone(pmd_t pmd)
397#ifdef CONFIG_TRANSPARENT_HUGEPAGE 396#ifdef CONFIG_TRANSPARENT_HUGEPAGE
398#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 397#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
399#endif 398#endif
400#define pmd_mkdevmap(pmd) pte_pmd(pte_mkdevmap(pmd_pte(pmd))) 399static inline pmd_t pmd_mkdevmap(pmd_t pmd)
400{
401 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
402}
401 403
402#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 404#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
403#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 405#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
@@ -448,8 +450,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
448 PMD_TYPE_SECT) 450 PMD_TYPE_SECT)
449 451
450#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 452#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
451#define pud_sect(pud) (0) 453static inline bool pud_sect(pud_t pud) { return false; }
452#define pud_table(pud) (1) 454static inline bool pud_table(pud_t pud) { return true; }
453#else 455#else
454#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 456#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
455 PUD_TYPE_SECT) 457 PUD_TYPE_SECT)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
193 regs->pmr_save = GIC_PRIO_IRQON; 193 regs->pmr_save = GIC_PRIO_IRQON;
194} 194}
195 195
196static inline void set_ssbs_bit(struct pt_regs *regs)
197{
198 regs->pstate |= PSR_SSBS_BIT;
199}
200
201static inline void set_compat_ssbs_bit(struct pt_regs *regs)
202{
203 regs->pstate |= PSR_AA32_SSBS_BIT;
204}
205
196static inline void start_thread(struct pt_regs *regs, unsigned long pc, 206static inline void start_thread(struct pt_regs *regs, unsigned long pc,
197 unsigned long sp) 207 unsigned long sp)
198{ 208{
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
200 regs->pstate = PSR_MODE_EL0t; 210 regs->pstate = PSR_MODE_EL0t;
201 211
202 if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) 212 if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
203 regs->pstate |= PSR_SSBS_BIT; 213 set_ssbs_bit(regs);
204 214
205 regs->sp = sp; 215 regs->sp = sp;
206} 216}
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
219#endif 229#endif
220 230
221 if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) 231 if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
222 regs->pstate |= PSR_AA32_SSBS_BIT; 232 set_compat_ssbs_bit(regs);
223 233
224 regs->compat_sp = sp; 234 regs->compat_sp = sp;
225} 235}
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b1dd039023ef..1dcf63a9ac1f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -30,7 +30,7 @@
30 * in the the priority mask, it indicates that PSR.I should be set and 30 * in the the priority mask, it indicates that PSR.I should be set and
31 * interrupt disabling temporarily does not rely on IRQ priorities. 31 * interrupt disabling temporarily does not rely on IRQ priorities.
32 */ 32 */
33#define GIC_PRIO_IRQON 0xc0 33#define GIC_PRIO_IRQON 0xe0
34#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) 34#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
35#define GIC_PRIO_PSR_I_SET (1 << 4) 35#define GIC_PRIO_PSR_I_SET (1 << 4)
36 36
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index df45af931459..4d9b1f48dc39 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -8,19 +8,12 @@
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/sched/task_stack.h> 10#include <linux/sched/task_stack.h>
11#include <linux/types.h>
11 12
12#include <asm/memory.h> 13#include <asm/memory.h>
13#include <asm/ptrace.h> 14#include <asm/ptrace.h>
14#include <asm/sdei.h> 15#include <asm/sdei.h>
15 16
16struct stackframe {
17 unsigned long fp;
18 unsigned long pc;
19#ifdef CONFIG_FUNCTION_GRAPH_TRACER
20 int graph;
21#endif
22};
23
24enum stack_type { 17enum stack_type {
25 STACK_TYPE_UNKNOWN, 18 STACK_TYPE_UNKNOWN,
26 STACK_TYPE_TASK, 19 STACK_TYPE_TASK,
@@ -28,6 +21,7 @@ enum stack_type {
28 STACK_TYPE_OVERFLOW, 21 STACK_TYPE_OVERFLOW,
29 STACK_TYPE_SDEI_NORMAL, 22 STACK_TYPE_SDEI_NORMAL,
30 STACK_TYPE_SDEI_CRITICAL, 23 STACK_TYPE_SDEI_CRITICAL,
24 __NR_STACK_TYPES
31}; 25};
32 26
33struct stack_info { 27struct stack_info {
@@ -36,6 +30,37 @@ struct stack_info {
36 enum stack_type type; 30 enum stack_type type;
37}; 31};
38 32
33/*
34 * A snapshot of a frame record or fp/lr register values, along with some
35 * accounting information necessary for robust unwinding.
36 *
37 * @fp: The fp value in the frame record (or the real fp)
38 * @pc: The fp value in the frame record (or the real lr)
39 *
40 * @stacks_done: Stacks which have been entirely unwound, for which it is no
41 * longer valid to unwind to.
42 *
43 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
44 * of 0. This is used to ensure that within a stack, each
45 * subsequent frame record is at an increasing address.
46 * @prev_type: The type of stack this frame record was on, or a synthetic
47 * value of STACK_TYPE_UNKNOWN. This is used to detect a
48 * transition from one stack to another.
49 *
50 * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
51 * replacement lr value in the ftrace graph stack.
52 */
53struct stackframe {
54 unsigned long fp;
55 unsigned long pc;
56 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
57 unsigned long prev_fp;
58 enum stack_type prev_type;
59#ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 int graph;
61#endif
62};
63
39extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); 64extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
40extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, 65extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
41 int (*fn)(struct stackframe *, void *), void *data); 66 int (*fn)(struct stackframe *, void *), void *data);
@@ -64,8 +89,9 @@ static inline bool on_irq_stack(unsigned long sp,
64 return true; 89 return true;
65} 90}
66 91
67static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, 92static inline bool on_task_stack(const struct task_struct *tsk,
68 struct stack_info *info) 93 unsigned long sp,
94 struct stack_info *info)
69{ 95{
70 unsigned long low = (unsigned long)task_stack_page(tsk); 96 unsigned long low = (unsigned long)task_stack_page(tsk);
71 unsigned long high = low + THREAD_SIZE; 97 unsigned long high = low + THREAD_SIZE;
@@ -112,10 +138,13 @@ static inline bool on_overflow_stack(unsigned long sp,
112 * We can only safely access per-cpu stacks from current in a non-preemptible 138 * We can only safely access per-cpu stacks from current in a non-preemptible
113 * context. 139 * context.
114 */ 140 */
115static inline bool on_accessible_stack(struct task_struct *tsk, 141static inline bool on_accessible_stack(const struct task_struct *tsk,
116 unsigned long sp, 142 unsigned long sp,
117 struct stack_info *info) 143 struct stack_info *info)
118{ 144{
145 if (info)
146 info->type = STACK_TYPE_UNKNOWN;
147
119 if (on_task_stack(tsk, sp, info)) 148 if (on_task_stack(tsk, sp, info))
120 return true; 149 return true;
121 if (tsk != current || preemptible()) 150 if (tsk != current || preemptible())
@@ -130,4 +159,27 @@ static inline bool on_accessible_stack(struct task_struct *tsk,
130 return false; 159 return false;
131} 160}
132 161
162static inline void start_backtrace(struct stackframe *frame,
163 unsigned long fp, unsigned long pc)
164{
165 frame->fp = fp;
166 frame->pc = pc;
167#ifdef CONFIG_FUNCTION_GRAPH_TRACER
168 frame->graph = 0;
169#endif
170
171 /*
172 * Prime the first unwind.
173 *
174 * In unwind_frame() we'll check that the FP points to a valid stack,
175 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
176 * treated as a transition to whichever stack that happens to be. The
177 * prev_fp value won't be used, but we set it to 0 such that it is
178 * definitely not an accessible stack address.
179 */
180 bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
181 frame->prev_fp = 0;
182 frame->prev_type = STACK_TYPE_UNKNOWN;
183}
184
133#endif /* __ASM_STACKTRACE_H */ 185#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index f4812777f5c5..c50ee1b7d5cd 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -16,6 +16,8 @@
16 16
17#define VDSO_HAS_CLOCK_GETRES 1 17#define VDSO_HAS_CLOCK_GETRES 1
18 18
19#define VDSO_HAS_32BIT_FALLBACK 1
20
19static __always_inline 21static __always_inline
20int gettimeofday_fallback(struct __kernel_old_timeval *_tv, 22int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
21 struct timezone *_tz) 23 struct timezone *_tz)
@@ -52,6 +54,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
52} 54}
53 55
54static __always_inline 56static __always_inline
57long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
58{
59 register struct old_timespec32 *ts asm("r1") = _ts;
60 register clockid_t clkid asm("r0") = _clkid;
61 register long ret asm ("r0");
62 register long nr asm("r7") = __NR_compat_clock_gettime;
63
64 asm volatile(
65 " swi #0\n"
66 : "=r" (ret)
67 : "r" (clkid), "r" (ts), "r" (nr)
68 : "memory");
69
70 return ret;
71}
72
73static __always_inline
55int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 74int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
56{ 75{
57 register struct __kernel_timespec *ts asm("r1") = _ts; 76 register struct __kernel_timespec *ts asm("r1") = _ts;
@@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
72 return ret; 91 return ret;
73} 92}
74 93
94static __always_inline
95int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
96{
97 register struct old_timespec32 *ts asm("r1") = _ts;
98 register clockid_t clkid asm("r0") = _clkid;
99 register long ret asm ("r0");
100 register long nr asm("r7") = __NR_compat_clock_getres;
101
102 /* The checks below are required for ABI consistency with arm */
103 if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
104 return -EINVAL;
105
106 asm volatile(
107 " swi #0\n"
108 : "=r" (ret)
109 : "r" (clkid), "r" (ts), "r" (nr)
110 : "memory");
111
112 return ret;
113}
114
75static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) 115static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
76{ 116{
77 u64 res; 117 u64 res;
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
index b551b741653d..5e1e648aeec4 100644
--- a/arch/arm64/include/uapi/asm/bpf_perf_event.h
+++ b/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ 2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__ 3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4 4
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f29f36a65175..b1fdc486aed8 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
184}; 184};
185 185
186static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 186static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
187 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 187 /*
188 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 188 * We already refuse to boot CPUs that don't support our configured
189 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 189 * page size, so we can only detect mismatches for a page size other
190 * than the one we're currently using. Unfortunately, SoCs like this
191 * exist in the wild so, even though we don't like it, we'll have to go
192 * along with it and treat them as non-strict.
193 */
194 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
195 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
196 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
197
190 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 198 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
191 /* Linux shouldn't care about secure memory */ 199 /* Linux shouldn't care about secure memory */
192 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), 200 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
@@ -225,8 +233,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
225 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ 233 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
226 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1), 234 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
227 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1), 235 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
228 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0), 236 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
229 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0), 237 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
230 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), 238 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
231 /* 239 /*
232 * Linux can handle differing I-cache policies. Userspace JITs will 240 * Linux can handle differing I-cache policies. Userspace JITs will
@@ -468,6 +476,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
468 case FTR_LOWER_SAFE: 476 case FTR_LOWER_SAFE:
469 ret = new < cur ? new : cur; 477 ret = new < cur ? new : cur;
470 break; 478 break;
479 case FTR_HIGHER_OR_ZERO_SAFE:
480 if (!cur || !new)
481 break;
482 /* Fallthrough */
471 case FTR_HIGHER_SAFE: 483 case FTR_HIGHER_SAFE:
472 ret = new > cur ? new : cur; 484 ret = new > cur ? new : cur;
473 break; 485 break;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f8719bd30850..48222a4760c2 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
207 207
208 list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; 208 list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
209 209
210 rcu_read_lock(); 210 /*
211 211 * Since single-step exception disables interrupt, this function is
212 * entirely not preemptible, and we can use rcu list safely here.
213 */
212 list_for_each_entry_rcu(hook, list, node) { 214 list_for_each_entry_rcu(hook, list, node) {
213 retval = hook->fn(regs, esr); 215 retval = hook->fn(regs, esr);
214 if (retval == DBG_HOOK_HANDLED) 216 if (retval == DBG_HOOK_HANDLED)
215 break; 217 break;
216 } 218 }
217 219
218 rcu_read_unlock();
219
220 return retval; 220 return retval;
221} 221}
222NOKPROBE_SYMBOL(call_step_hook); 222NOKPROBE_SYMBOL(call_step_hook);
@@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
305 305
306 list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; 306 list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
307 307
308 rcu_read_lock(); 308 /*
309 * Since brk exception disables interrupt, this function is
310 * entirely not preemptible, and we can use rcu list safely here.
311 */
309 list_for_each_entry_rcu(hook, list, node) { 312 list_for_each_entry_rcu(hook, list, node) {
310 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; 313 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
311 314
312 if ((comment & ~hook->mask) == hook->imm) 315 if ((comment & ~hook->mask) == hook->imm)
313 fn = hook->fn; 316 fn = hook->fn;
314 } 317 }
315 rcu_read_unlock();
316 318
317 return fn ? fn(regs, esr) : DBG_HOOK_ERROR; 319 return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
318} 320}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9cdc4592da3e..320a30dbe35e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -586,10 +586,8 @@ el1_sync:
586 b.eq el1_ia 586 b.eq el1_ia
587 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap 587 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
588 b.eq el1_undef 588 b.eq el1_undef
589 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
590 b.eq el1_sp_pc
591 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception 589 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
592 b.eq el1_sp_pc 590 b.eq el1_pc
593 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 591 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
594 b.eq el1_undef 592 b.eq el1_undef
595 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 593 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
@@ -611,9 +609,11 @@ el1_da:
611 bl do_mem_abort 609 bl do_mem_abort
612 610
613 kernel_exit 1 611 kernel_exit 1
614el1_sp_pc: 612el1_pc:
615 /* 613 /*
616 * Stack or PC alignment exception handling 614 * PC alignment exception handling. We don't handle SP alignment faults,
615 * since we will have hit a recursive exception when trying to push the
616 * initial pt_regs.
617 */ 617 */
618 mrs x0, far_el1 618 mrs x0, far_el1
619 inherit_daif pstate=x23, tmp=x2 619 inherit_daif pstate=x23, tmp=x2
@@ -732,9 +732,9 @@ el0_sync:
732 ccmp x24, #ESR_ELx_EC_WFx, #4, ne 732 ccmp x24, #ESR_ELx_EC_WFx, #4, ne
733 b.eq el0_sys 733 b.eq el0_sys
734 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception 734 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
735 b.eq el0_sp_pc 735 b.eq el0_sp
736 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception 736 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
737 b.eq el0_sp_pc 737 b.eq el0_pc
738 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 738 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
739 b.eq el0_undef 739 b.eq el0_undef
740 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 740 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
@@ -758,7 +758,7 @@ el0_sync_compat:
758 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception 758 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
759 b.eq el0_fpsimd_exc 759 b.eq el0_fpsimd_exc
760 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception 760 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
761 b.eq el0_sp_pc 761 b.eq el0_pc
762 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 762 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
763 b.eq el0_undef 763 b.eq el0_undef
764 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap 764 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
@@ -858,11 +858,15 @@ el0_fpsimd_exc:
858 mov x1, sp 858 mov x1, sp
859 bl do_fpsimd_exc 859 bl do_fpsimd_exc
860 b ret_to_user 860 b ret_to_user
861el0_sp:
862 ldr x26, [sp, #S_SP]
863 b el0_sp_pc
864el0_pc:
865 mrs x26, far_el1
861el0_sp_pc: 866el0_sp_pc:
862 /* 867 /*
863 * Stack or PC alignment exception handling 868 * Stack or PC alignment exception handling
864 */ 869 */
865 mrs x26, far_el1
866 gic_prio_kentry_setup tmp=x0 870 gic_prio_kentry_setup tmp=x0
867 enable_da_f 871 enable_da_f
868#ifdef CONFIG_TRACE_IRQFLAGS 872#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index eec4776ae5f0..37d3912cfe06 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x)
406 406
407#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) 407#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
408 408
409static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
410 unsigned int vq)
411{
412 unsigned int i;
413 __uint128_t *p;
414
415 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
416 p = (__uint128_t *)ZREG(sst, vq, i);
417 *p = arm64_cpu_to_le128(fst->vregs[i]);
418 }
419}
420
409/* 421/*
410 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to 422 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
411 * task->thread.sve_state. 423 * task->thread.sve_state.
@@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task)
423 unsigned int vq; 435 unsigned int vq;
424 void *sst = task->thread.sve_state; 436 void *sst = task->thread.sve_state;
425 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; 437 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
426 unsigned int i;
427 __uint128_t *p;
428 438
429 if (!system_supports_sve()) 439 if (!system_supports_sve())
430 return; 440 return;
431 441
432 vq = sve_vq_from_vl(task->thread.sve_vl); 442 vq = sve_vq_from_vl(task->thread.sve_vl);
433 for (i = 0; i < 32; ++i) { 443 __fpsimd_to_sve(sst, fst, vq);
434 p = (__uint128_t *)ZREG(sst, vq, i);
435 *p = arm64_cpu_to_le128(fst->vregs[i]);
436 }
437} 444}
438 445
439/* 446/*
@@ -459,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task)
459 return; 466 return;
460 467
461 vq = sve_vq_from_vl(task->thread.sve_vl); 468 vq = sve_vq_from_vl(task->thread.sve_vl);
462 for (i = 0; i < 32; ++i) { 469 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
463 p = (__uint128_t const *)ZREG(sst, vq, i); 470 p = (__uint128_t const *)ZREG(sst, vq, i);
464 fst->vregs[i] = arm64_le128_to_cpu(*p); 471 fst->vregs[i] = arm64_le128_to_cpu(*p);
465 } 472 }
@@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
550 unsigned int vq; 557 unsigned int vq;
551 void *sst = task->thread.sve_state; 558 void *sst = task->thread.sve_state;
552 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; 559 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
553 unsigned int i;
554 __uint128_t *p;
555 560
556 if (!test_tsk_thread_flag(task, TIF_SVE)) 561 if (!test_tsk_thread_flag(task, TIF_SVE))
557 return; 562 return;
@@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
559 vq = sve_vq_from_vl(task->thread.sve_vl); 564 vq = sve_vq_from_vl(task->thread.sve_vl);
560 565
561 memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); 566 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
562 567 __fpsimd_to_sve(sst, fst, vq);
563 for (i = 0; i < 32; ++i) {
564 p = (__uint128_t *)ZREG(sst, vq, i);
565 *p = arm64_cpu_to_le128(fst->vregs[i]);
566 }
567} 568}
568 569
569int sve_set_vector_length(struct task_struct *task, 570int sve_set_vector_length(struct task_struct *task,
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 1285c7b2947f..171773257974 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
73 73
74 if (offset < -SZ_128M || offset >= SZ_128M) { 74 if (offset < -SZ_128M || offset >= SZ_128M) {
75#ifdef CONFIG_ARM64_MODULE_PLTS 75#ifdef CONFIG_ARM64_MODULE_PLTS
76 struct plt_entry trampoline; 76 struct plt_entry trampoline, *dst;
77 struct module *mod; 77 struct module *mod;
78 78
79 /* 79 /*
@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
106 * to check if the actual opcodes are in fact identical, 106 * to check if the actual opcodes are in fact identical,
107 * regardless of the offset in memory so use memcmp() instead. 107 * regardless of the offset in memory so use memcmp() instead.
108 */ 108 */
109 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 109 dst = mod->arch.ftrace_trampoline;
110 if (memcmp(mod->arch.ftrace_trampoline, &trampoline, 110 trampoline = get_plt_entry(addr, dst);
111 sizeof(trampoline))) { 111 if (memcmp(dst, &trampoline, sizeof(trampoline))) {
112 if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { 112 if (plt_entry_is_initialized(dst)) {
113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 /* point the trampoline to our ftrace entry point */ 117 /* point the trampoline to our ftrace entry point */
118 module_disable_ro(mod); 118 module_disable_ro(mod);
119 *mod->arch.ftrace_trampoline = trampoline; 119 *dst = trampoline;
120 module_enable_ro(mod, true); 120 module_enable_ro(mod, true);
121 121
122 /* update trampoline before patching in the branch */ 122 /*
123 smp_wmb(); 123 * Ensure updated trampoline is visible to instruction
124 * fetch before we patch in the branch.
125 */
126 __flush_icache_range((unsigned long)&dst[0],
127 (unsigned long)&dst[1]);
124 } 128 }
125 addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 129 addr = (unsigned long)dst;
126#else /* CONFIG_ARM64_MODULE_PLTS */ 130#else /* CONFIG_ARM64_MODULE_PLTS */
127 return -EINVAL; 131 return -EINVAL;
128#endif /* CONFIG_ARM64_MODULE_PLTS */ 132#endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index dceb84520948..38ee1514cd9c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
536 /* Aligned */ 536 /* Aligned */
537 break; 537 break;
538 case 1: 538 case 1:
539 /* Allow single byte watchpoint. */
540 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
541 break;
542 case 2: 539 case 2:
543 /* Allow halfword watchpoints and breakpoints. */ 540 /* Allow halfword watchpoints and breakpoints. */
544 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) 541 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
545 break; 542 break;
543
544 /* Fallthrough */
545 case 3:
546 /* Allow single byte watchpoint. */
547 if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
548 break;
549
550 /* Fallthrough */
546 default: 551 default:
547 return -EINVAL; 552 return -EINVAL;
548 } 553 }
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 46e643e30708..03ff15bffbb6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
314 /* MOVW instruction relocations. */ 314 /* MOVW instruction relocations. */
315 case R_AARCH64_MOVW_UABS_G0_NC: 315 case R_AARCH64_MOVW_UABS_G0_NC:
316 overflow_check = false; 316 overflow_check = false;
317 /* Fall through */
317 case R_AARCH64_MOVW_UABS_G0: 318 case R_AARCH64_MOVW_UABS_G0:
318 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 319 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
319 AARCH64_INSN_IMM_MOVKZ); 320 AARCH64_INSN_IMM_MOVKZ);
320 break; 321 break;
321 case R_AARCH64_MOVW_UABS_G1_NC: 322 case R_AARCH64_MOVW_UABS_G1_NC:
322 overflow_check = false; 323 overflow_check = false;
324 /* Fall through */
323 case R_AARCH64_MOVW_UABS_G1: 325 case R_AARCH64_MOVW_UABS_G1:
324 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 326 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
325 AARCH64_INSN_IMM_MOVKZ); 327 AARCH64_INSN_IMM_MOVKZ);
326 break; 328 break;
327 case R_AARCH64_MOVW_UABS_G2_NC: 329 case R_AARCH64_MOVW_UABS_G2_NC:
328 overflow_check = false; 330 overflow_check = false;
331 /* Fall through */
329 case R_AARCH64_MOVW_UABS_G2: 332 case R_AARCH64_MOVW_UABS_G2:
330 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 333 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
331 AARCH64_INSN_IMM_MOVKZ); 334 AARCH64_INSN_IMM_MOVKZ);
@@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
393 break; 396 break;
394 case R_AARCH64_ADR_PREL_PG_HI21_NC: 397 case R_AARCH64_ADR_PREL_PG_HI21_NC:
395 overflow_check = false; 398 overflow_check = false;
399 /* Fall through */
396 case R_AARCH64_ADR_PREL_PG_HI21: 400 case R_AARCH64_ADR_PREL_PG_HI21:
397 ovf = reloc_insn_adrp(me, sechdrs, loc, val); 401 ovf = reloc_insn_adrp(me, sechdrs, loc, val);
398 if (ovf && ovf != -ERANGE) 402 if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 9d63514b9836..b0e03e052dd1 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
154 return; 154 return;
155 } 155 }
156 156
157 frame.fp = regs->regs[29]; 157 start_backtrace(&frame, regs->regs[29], regs->pc);
158 frame.pc = regs->pc;
159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
160 frame.graph = 0;
161#endif
162
163 walk_stackframe(current, &frame, callchain_trace, entry); 158 walk_stackframe(current, &frame, callchain_trace, entry);
164} 159}
165 160
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bd5dfffca272..c4452827419b 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -21,6 +21,7 @@
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/debug-monitors.h> 23#include <asm/debug-monitors.h>
24#include <asm/daifflags.h>
24#include <asm/system_misc.h> 25#include <asm/system_misc.h>
25#include <asm/insn.h> 26#include <asm/insn.h>
26#include <linux/uaccess.h> 27#include <linux/uaccess.h>
@@ -168,33 +169,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
168} 169}
169 170
170/* 171/*
171 * When PSTATE.D is set (masked), then software step exceptions can not be
172 * generated.
173 * SPSR's D bit shows the value of PSTATE.D immediately before the
174 * exception was taken. PSTATE.D is set while entering into any exception
175 * mode, however software clears it for any normal (none-debug-exception)
176 * mode in the exception entry. Therefore, when we are entering into kprobe
177 * breakpoint handler from any normal mode then SPSR.D bit is already
178 * cleared, however it is set when we are entering from any debug exception
179 * mode.
180 * Since we always need to generate single step exception after a kprobe
181 * breakpoint exception therefore we need to clear it unconditionally, when
182 * we become sure that the current breakpoint exception is for kprobe.
183 */
184static void __kprobes
185spsr_set_debug_flag(struct pt_regs *regs, int mask)
186{
187 unsigned long spsr = regs->pstate;
188
189 if (mask)
190 spsr |= PSR_D_BIT;
191 else
192 spsr &= ~PSR_D_BIT;
193
194 regs->pstate = spsr;
195}
196
197/*
198 * Interrupts need to be disabled before single-step mode is set, and not 172 * Interrupts need to be disabled before single-step mode is set, and not
199 * reenabled until after single-step mode ends. 173 * reenabled until after single-step mode ends.
200 * Without disabling interrupt on local CPU, there is a chance of 174 * Without disabling interrupt on local CPU, there is a chance of
@@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
205static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 179static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
206 struct pt_regs *regs) 180 struct pt_regs *regs)
207{ 181{
208 kcb->saved_irqflag = regs->pstate; 182 kcb->saved_irqflag = regs->pstate & DAIF_MASK;
209 regs->pstate |= PSR_I_BIT; 183 regs->pstate |= PSR_I_BIT;
184 /* Unmask PSTATE.D for enabling software step exceptions. */
185 regs->pstate &= ~PSR_D_BIT;
210} 186}
211 187
212static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, 188static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
213 struct pt_regs *regs) 189 struct pt_regs *regs)
214{ 190{
215 if (kcb->saved_irqflag & PSR_I_BIT) 191 regs->pstate &= ~DAIF_MASK;
216 regs->pstate |= PSR_I_BIT; 192 regs->pstate |= kcb->saved_irqflag;
217 else
218 regs->pstate &= ~PSR_I_BIT;
219} 193}
220 194
221static void __kprobes 195static void __kprobes
@@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
252 226
253 set_ss_context(kcb, slot); /* mark pending ss */ 227 set_ss_context(kcb, slot); /* mark pending ss */
254 228
255 spsr_set_debug_flag(regs, 0);
256
257 /* IRQs and single stepping do not mix well. */ 229 /* IRQs and single stepping do not mix well. */
258 kprobes_save_local_irqflag(kcb, regs); 230 kprobes_save_local_irqflag(kcb, regs);
259 kernel_enable_single_step(regs); 231 kernel_enable_single_step(regs);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6a869d9f304f..f674f28df663 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
398 childregs->pstate |= PSR_UAO_BIT; 398 childregs->pstate |= PSR_UAO_BIT;
399 399
400 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) 400 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
401 childregs->pstate |= PSR_SSBS_BIT; 401 set_ssbs_bit(childregs);
402 402
403 if (system_uses_irq_prio_masking()) 403 if (system_uses_irq_prio_masking())
404 childregs->pmr_save = GIC_PRIO_IRQON; 404 childregs->pmr_save = GIC_PRIO_IRQON;
@@ -443,6 +443,32 @@ void uao_thread_switch(struct task_struct *next)
443} 443}
444 444
445/* 445/*
446 * Force SSBS state on context-switch, since it may be lost after migrating
447 * from a CPU which treats the bit as RES0 in a heterogeneous system.
448 */
449static void ssbs_thread_switch(struct task_struct *next)
450{
451 struct pt_regs *regs = task_pt_regs(next);
452
453 /*
454 * Nothing to do for kernel threads, but 'regs' may be junk
455 * (e.g. idle task) so check the flags and bail early.
456 */
457 if (unlikely(next->flags & PF_KTHREAD))
458 return;
459
460 /* If the mitigation is enabled, then we leave SSBS clear. */
461 if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
462 test_tsk_thread_flag(next, TIF_SSBD))
463 return;
464
465 if (compat_user_mode(regs))
466 set_compat_ssbs_bit(regs);
467 else if (user_mode(regs))
468 set_ssbs_bit(regs);
469}
470
471/*
446 * We store our current task in sp_el0, which is clobbered by userspace. Keep a 472 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
447 * shadow copy so that we can restore this upon entry from userspace. 473 * shadow copy so that we can restore this upon entry from userspace.
448 * 474 *
@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
471 entry_task_switch(next); 497 entry_task_switch(next);
472 uao_thread_switch(next); 498 uao_thread_switch(next);
473 ptrauth_thread_switch(next); 499 ptrauth_thread_switch(next);
500 ssbs_thread_switch(next);
474 501
475 /* 502 /*
476 * Complete any pending TLB or cache maintenance on this CPU in case 503 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -498,11 +525,8 @@ unsigned long get_wchan(struct task_struct *p)
498 if (!stack_page) 525 if (!stack_page)
499 return 0; 526 return 0;
500 527
501 frame.fp = thread_saved_fp(p); 528 start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
502 frame.pc = thread_saved_pc(p); 529
503#ifdef CONFIG_FUNCTION_GRAPH_TRACER
504 frame.graph = 0;
505#endif
506 do { 530 do {
507 if (unwind_frame(p, &frame)) 531 if (unwind_frame(p, &frame))
508 goto out; 532 goto out;
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index b21cba90f82d..a5e8b3b9d798 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <linux/kprobes.h>
11 12
12#include <asm/stack_pointer.h> 13#include <asm/stack_pointer.h>
13#include <asm/stacktrace.h> 14#include <asm/stacktrace.h>
@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
29 return 0; 30 return 0;
30 } 31 }
31} 32}
33NOKPROBE_SYMBOL(save_return_addr);
32 34
33void *return_address(unsigned int level) 35void *return_address(unsigned int level)
34{ 36{
@@ -38,12 +40,9 @@ void *return_address(unsigned int level)
38 data.level = level + 2; 40 data.level = level + 2;
39 data.addr = NULL; 41 data.addr = NULL;
40 42
41 frame.fp = (unsigned long)__builtin_frame_address(0); 43 start_backtrace(&frame,
42 frame.pc = (unsigned long)return_address; /* dummy */ 44 (unsigned long)__builtin_frame_address(0),
43#ifdef CONFIG_FUNCTION_GRAPH_TRACER 45 (unsigned long)return_address);
44 frame.graph = 0;
45#endif
46
47 walk_stackframe(current, &frame, save_return_addr, &data); 46 walk_stackframe(current, &frame, save_return_addr, &data);
48 47
49 if (!data.level) 48 if (!data.level)
@@ -52,3 +51,4 @@ void *return_address(unsigned int level)
52 return NULL; 51 return NULL;
53} 52}
54EXPORT_SYMBOL_GPL(return_address); 53EXPORT_SYMBOL_GPL(return_address);
54NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ea90d3bd9253..018a33e01b0e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
152 pr_crit("CPU%u: died during early boot\n", cpu); 152 pr_crit("CPU%u: died during early boot\n", cpu);
153 break; 153 break;
154 } 154 }
155 /* Fall through */
156 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 155 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
156 /* Fall through */
157 case CPU_STUCK_IN_KERNEL: 157 case CPU_STUCK_IN_KERNEL:
158 pr_crit("CPU%u: is stuck in kernel\n", cpu); 158 pr_crit("CPU%u: is stuck in kernel\n", cpu);
159 if (status & CPU_STUCK_REASON_52_BIT_VA) 159 if (status & CPU_STUCK_REASON_52_BIT_VA)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 62d395151abe..a336cb124320 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -7,6 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/export.h> 8#include <linux/export.h>
9#include <linux/ftrace.h> 9#include <linux/ftrace.h>
10#include <linux/kprobes.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/sched/debug.h> 12#include <linux/sched/debug.h>
12#include <linux/sched/task_stack.h> 13#include <linux/sched/task_stack.h>
@@ -29,9 +30,18 @@
29 * ldp x29, x30, [sp] 30 * ldp x29, x30, [sp]
30 * add sp, sp, #0x10 31 * add sp, sp, #0x10
31 */ 32 */
33
34/*
35 * Unwind from one frame record (A) to the next frame record (B).
36 *
37 * We terminate early if the location of B indicates a malformed chain of frame
38 * records (e.g. a cycle), determined based on the location and fp value of A
39 * and the location (but not the fp value) of B.
40 */
32int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) 41int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
33{ 42{
34 unsigned long fp = frame->fp; 43 unsigned long fp = frame->fp;
44 struct stack_info info;
35 45
36 if (fp & 0xf) 46 if (fp & 0xf)
37 return -EINVAL; 47 return -EINVAL;
@@ -39,11 +49,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
39 if (!tsk) 49 if (!tsk)
40 tsk = current; 50 tsk = current;
41 51
42 if (!on_accessible_stack(tsk, fp, NULL)) 52 if (!on_accessible_stack(tsk, fp, &info))
53 return -EINVAL;
54
55 if (test_bit(info.type, frame->stacks_done))
43 return -EINVAL; 56 return -EINVAL;
44 57
58 /*
59 * As stacks grow downward, any valid record on the same stack must be
60 * at a strictly higher address than the prior record.
61 *
62 * Stacks can nest in several valid orders, e.g.
63 *
64 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
65 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
66 *
67 * ... but the nesting itself is strict. Once we transition from one
68 * stack to another, it's never valid to unwind back to that first
69 * stack.
70 */
71 if (info.type == frame->prev_type) {
72 if (fp <= frame->prev_fp)
73 return -EINVAL;
74 } else {
75 set_bit(frame->prev_type, frame->stacks_done);
76 }
77
78 /*
79 * Record this frame record's values and location. The prev_fp and
80 * prev_type are only meaningful to the next unwind_frame() invocation.
81 */
45 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); 82 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
46 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); 83 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
84 frame->prev_fp = fp;
85 frame->prev_type = info.type;
47 86
48#ifdef CONFIG_FUNCTION_GRAPH_TRACER 87#ifdef CONFIG_FUNCTION_GRAPH_TRACER
49 if (tsk->ret_stack && 88 if (tsk->ret_stack &&
@@ -73,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
73 112
74 return 0; 113 return 0;
75} 114}
115NOKPROBE_SYMBOL(unwind_frame);
76 116
77void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, 117void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
78 int (*fn)(struct stackframe *, void *), void *data) 118 int (*fn)(struct stackframe *, void *), void *data)
@@ -87,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
87 break; 127 break;
88 } 128 }
89} 129}
130NOKPROBE_SYMBOL(walk_stackframe);
90 131
91#ifdef CONFIG_STACKTRACE 132#ifdef CONFIG_STACKTRACE
92struct stack_trace_data { 133struct stack_trace_data {
@@ -122,12 +163,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
122 data.skip = trace->skip; 163 data.skip = trace->skip;
123 data.no_sched_functions = 0; 164 data.no_sched_functions = 0;
124 165
125 frame.fp = regs->regs[29]; 166 start_backtrace(&frame, regs->regs[29], regs->pc);
126 frame.pc = regs->pc;
127#ifdef CONFIG_FUNCTION_GRAPH_TRACER
128 frame.graph = 0;
129#endif
130
131 walk_stackframe(current, &frame, save_trace, &data); 167 walk_stackframe(current, &frame, save_trace, &data);
132} 168}
133EXPORT_SYMBOL_GPL(save_stack_trace_regs); 169EXPORT_SYMBOL_GPL(save_stack_trace_regs);
@@ -146,17 +182,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
146 data.no_sched_functions = nosched; 182 data.no_sched_functions = nosched;
147 183
148 if (tsk != current) { 184 if (tsk != current) {
149 frame.fp = thread_saved_fp(tsk); 185 start_backtrace(&frame, thread_saved_fp(tsk),
150 frame.pc = thread_saved_pc(tsk); 186 thread_saved_pc(tsk));
151 } else { 187 } else {
152 /* We don't want this function nor the caller */ 188 /* We don't want this function nor the caller */
153 data.skip += 2; 189 data.skip += 2;
154 frame.fp = (unsigned long)__builtin_frame_address(0); 190 start_backtrace(&frame,
155 frame.pc = (unsigned long)__save_stack_trace; 191 (unsigned long)__builtin_frame_address(0),
192 (unsigned long)__save_stack_trace);
156 } 193 }
157#ifdef CONFIG_FUNCTION_GRAPH_TRACER
158 frame.graph = 0;
159#endif
160 194
161 walk_stackframe(tsk, &frame, save_trace, &data); 195 walk_stackframe(tsk, &frame, save_trace, &data);
162 196
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 9f25aedeac9d..0b2946414dc9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
38 if (!in_lock_functions(regs->pc)) 38 if (!in_lock_functions(regs->pc))
39 return regs->pc; 39 return regs->pc;
40 40
41 frame.fp = regs->regs[29]; 41 start_backtrace(&frame, regs->regs[29], regs->pc);
42 frame.pc = regs->pc; 42
43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 frame.graph = 0;
45#endif
46 do { 43 do {
47 int ret = unwind_frame(NULL, &frame); 44 int ret = unwind_frame(NULL, &frame);
48 if (ret < 0) 45 if (ret < 0)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8c03456dade6..32893b3d9164 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
100 return; 100 return;
101 101
102 if (tsk == current) { 102 if (tsk == current) {
103 frame.fp = (unsigned long)__builtin_frame_address(0); 103 start_backtrace(&frame,
104 frame.pc = (unsigned long)dump_backtrace; 104 (unsigned long)__builtin_frame_address(0),
105 (unsigned long)dump_backtrace);
105 } else { 106 } else {
106 /* 107 /*
107 * task blocked in __switch_to 108 * task blocked in __switch_to
108 */ 109 */
109 frame.fp = thread_saved_fp(tsk); 110 start_backtrace(&frame,
110 frame.pc = thread_saved_pc(tsk); 111 thread_saved_fp(tsk),
112 thread_saved_pc(tsk));
111 } 113 }
112#ifdef CONFIG_FUNCTION_GRAPH_TRACER
113 frame.graph = 0;
114#endif
115 114
116 printk("Call trace:\n"); 115 printk("Call trace:\n");
117 do { 116 do {
@@ -734,6 +733,7 @@ static const char *esr_class_str[] = {
734 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", 733 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
735 [ESR_ELx_EC_FP_ASIMD] = "ASIMD", 734 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
736 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", 735 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
736 [ESR_ELx_EC_PAC] = "PAC",
737 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", 737 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
738 [ESR_ELx_EC_ILL] = "PSTATE.IL", 738 [ESR_ELx_EC_ILL] = "PSTATE.IL",
739 [ESR_ELx_EC_SVC32] = "SVC (AArch32)", 739 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 4ab863045188..dd2514bb1511 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -32,10 +32,10 @@ UBSAN_SANITIZE := n
32OBJECT_FILES_NON_STANDARD := y 32OBJECT_FILES_NON_STANDARD := y
33KCOV_INSTRUMENT := n 33KCOV_INSTRUMENT := n
34 34
35ifeq ($(c-gettimeofday-y),)
36CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny 35CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
37else 36
38CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y) 37ifneq ($(c-gettimeofday-y),)
38 CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
39endif 39endif
40 40
41# Clang versions less than 8 do not support -mcmodel=tiny 41# Clang versions less than 8 do not support -mcmodel=tiny
@@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so
57 57
58# Link rule for the .so file, .lds has to be first 58# Link rule for the .so file, .lds has to be first
59$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE 59$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
60 $(call if_changed,ld) 60 $(call if_changed,vdsold_and_vdso_check)
61 $(call if_changed,vdso_check)
62 61
63# Strip rule for the .so file 62# Strip rule for the .so file
64$(obj)/%.so: OBJCOPYFLAGS := -S 63$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -74,8 +73,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
74 $(call if_changed,vdsosym) 73 $(call if_changed,vdsosym)
75 74
76# Actual build commands 75# Actual build commands
77quiet_cmd_vdsocc = VDSOCC $@ 76quiet_cmd_vdsold_and_vdso_check = LD $@
78 cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $< 77 cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
79 78
80# Install commands for the unstripped file 79# Install commands for the unstripped file
81quiet_cmd_vdso_install = INSTALL $@ 80quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 60a4c6239712..1fba0776ed40 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
144 144
145# Link rule for the .so file, .lds has to be first 145# Link rule for the .so file, .lds has to be first
146$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE 146$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
147 $(call if_changed,vdsold) 147 $(call if_changed,vdsold_and_vdso_check)
148 $(call if_changed,vdso_check)
149 148
150# Compilation rules for the vDSO sources 149# Compilation rules for the vDSO sources
151$(c-obj-vdso): %.o: %.c FORCE 150$(c-obj-vdso): %.o: %.c FORCE
@@ -156,14 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE
156 $(call if_changed_dep,vdsoas) 155 $(call if_changed_dep,vdsoas)
157 156
158# Actual build commands 157# Actual build commands
159quiet_cmd_vdsold = VDSOL $@ 158quiet_cmd_vdsold_and_vdso_check = LD32 $@
159 cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
160
161quiet_cmd_vdsold = LD32 $@
160 cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ 162 cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
161 -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ 163 -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
162quiet_cmd_vdsocc = VDSOC $@ 164quiet_cmd_vdsocc = CC32 $@
163 cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< 165 cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
164quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@ 166quiet_cmd_vdsocc_gettimeofday = CC32 $@
165 cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< 167 cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
166quiet_cmd_vdsoas = VDSOA $@ 168quiet_cmd_vdsoas = AS32 $@
167 cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< 169 cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
168 170
169quiet_cmd_vdsomunge = MUNGE $@ 171quiet_cmd_vdsomunge = MUNGE $@
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 26781da3ad3e..0fc9872a1467 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -18,40 +18,70 @@
18#define save_debug(ptr,reg,nr) \ 18#define save_debug(ptr,reg,nr) \
19 switch (nr) { \ 19 switch (nr) { \
20 case 15: ptr[15] = read_debug(reg, 15); \ 20 case 15: ptr[15] = read_debug(reg, 15); \
21 /* Fall through */ \
21 case 14: ptr[14] = read_debug(reg, 14); \ 22 case 14: ptr[14] = read_debug(reg, 14); \
23 /* Fall through */ \
22 case 13: ptr[13] = read_debug(reg, 13); \ 24 case 13: ptr[13] = read_debug(reg, 13); \
25 /* Fall through */ \
23 case 12: ptr[12] = read_debug(reg, 12); \ 26 case 12: ptr[12] = read_debug(reg, 12); \
27 /* Fall through */ \
24 case 11: ptr[11] = read_debug(reg, 11); \ 28 case 11: ptr[11] = read_debug(reg, 11); \
29 /* Fall through */ \
25 case 10: ptr[10] = read_debug(reg, 10); \ 30 case 10: ptr[10] = read_debug(reg, 10); \
31 /* Fall through */ \
26 case 9: ptr[9] = read_debug(reg, 9); \ 32 case 9: ptr[9] = read_debug(reg, 9); \
33 /* Fall through */ \
27 case 8: ptr[8] = read_debug(reg, 8); \ 34 case 8: ptr[8] = read_debug(reg, 8); \
35 /* Fall through */ \
28 case 7: ptr[7] = read_debug(reg, 7); \ 36 case 7: ptr[7] = read_debug(reg, 7); \
37 /* Fall through */ \
29 case 6: ptr[6] = read_debug(reg, 6); \ 38 case 6: ptr[6] = read_debug(reg, 6); \
39 /* Fall through */ \
30 case 5: ptr[5] = read_debug(reg, 5); \ 40 case 5: ptr[5] = read_debug(reg, 5); \
41 /* Fall through */ \
31 case 4: ptr[4] = read_debug(reg, 4); \ 42 case 4: ptr[4] = read_debug(reg, 4); \
43 /* Fall through */ \
32 case 3: ptr[3] = read_debug(reg, 3); \ 44 case 3: ptr[3] = read_debug(reg, 3); \
45 /* Fall through */ \
33 case 2: ptr[2] = read_debug(reg, 2); \ 46 case 2: ptr[2] = read_debug(reg, 2); \
47 /* Fall through */ \
34 case 1: ptr[1] = read_debug(reg, 1); \ 48 case 1: ptr[1] = read_debug(reg, 1); \
49 /* Fall through */ \
35 default: ptr[0] = read_debug(reg, 0); \ 50 default: ptr[0] = read_debug(reg, 0); \
36 } 51 }
37 52
38#define restore_debug(ptr,reg,nr) \ 53#define restore_debug(ptr,reg,nr) \
39 switch (nr) { \ 54 switch (nr) { \
40 case 15: write_debug(ptr[15], reg, 15); \ 55 case 15: write_debug(ptr[15], reg, 15); \
56 /* Fall through */ \
41 case 14: write_debug(ptr[14], reg, 14); \ 57 case 14: write_debug(ptr[14], reg, 14); \
58 /* Fall through */ \
42 case 13: write_debug(ptr[13], reg, 13); \ 59 case 13: write_debug(ptr[13], reg, 13); \
60 /* Fall through */ \
43 case 12: write_debug(ptr[12], reg, 12); \ 61 case 12: write_debug(ptr[12], reg, 12); \
62 /* Fall through */ \
44 case 11: write_debug(ptr[11], reg, 11); \ 63 case 11: write_debug(ptr[11], reg, 11); \
64 /* Fall through */ \
45 case 10: write_debug(ptr[10], reg, 10); \ 65 case 10: write_debug(ptr[10], reg, 10); \
66 /* Fall through */ \
46 case 9: write_debug(ptr[9], reg, 9); \ 67 case 9: write_debug(ptr[9], reg, 9); \
68 /* Fall through */ \
47 case 8: write_debug(ptr[8], reg, 8); \ 69 case 8: write_debug(ptr[8], reg, 8); \
70 /* Fall through */ \
48 case 7: write_debug(ptr[7], reg, 7); \ 71 case 7: write_debug(ptr[7], reg, 7); \
72 /* Fall through */ \
49 case 6: write_debug(ptr[6], reg, 6); \ 73 case 6: write_debug(ptr[6], reg, 6); \
74 /* Fall through */ \
50 case 5: write_debug(ptr[5], reg, 5); \ 75 case 5: write_debug(ptr[5], reg, 5); \
76 /* Fall through */ \
51 case 4: write_debug(ptr[4], reg, 4); \ 77 case 4: write_debug(ptr[4], reg, 4); \
78 /* Fall through */ \
52 case 3: write_debug(ptr[3], reg, 3); \ 79 case 3: write_debug(ptr[3], reg, 3); \
80 /* Fall through */ \
53 case 2: write_debug(ptr[2], reg, 2); \ 81 case 2: write_debug(ptr[2], reg, 2); \
82 /* Fall through */ \
54 case 1: write_debug(ptr[1], reg, 1); \ 83 case 1: write_debug(ptr[1], reg, 1); \
84 /* Fall through */ \
55 default: write_debug(ptr[0], reg, 0); \ 85 default: write_debug(ptr[0], reg, 0); \
56 } 86 }
57 87
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index 0d60e4f0af66..a900181e3867 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
178 switch (spsr_idx) { 178 switch (spsr_idx) {
179 case KVM_SPSR_SVC: 179 case KVM_SPSR_SVC:
180 write_sysreg_el1(v, SYS_SPSR); 180 write_sysreg_el1(v, SYS_SPSR);
181 break;
181 case KVM_SPSR_ABT: 182 case KVM_SPSR_ABT:
182 write_sysreg(v, spsr_abt); 183 write_sysreg(v, spsr_abt);
184 break;
183 case KVM_SPSR_UND: 185 case KVM_SPSR_UND:
184 write_sysreg(v, spsr_und); 186 write_sysreg(v, spsr_und);
187 break;
185 case KVM_SPSR_IRQ: 188 case KVM_SPSR_IRQ:
186 write_sysreg(v, spsr_irq); 189 write_sysreg(v, spsr_irq);
190 break;
187 case KVM_SPSR_FIQ: 191 case KVM_SPSR_FIQ:
188 write_sysreg(v, spsr_fiq); 192 write_sysreg(v, spsr_fiq);
193 break;
189 } 194 }
190} 195}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f26e181d881c..2071260a275b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -632,7 +632,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
632 */ 632 */
633 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) 633 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
634 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); 634 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
635 __vcpu_sys_reg(vcpu, PMCR_EL0) = val; 635 __vcpu_sys_reg(vcpu, r->reg) = val;
636} 636}
637 637
638static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) 638static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -981,13 +981,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
981/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 981/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
982#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 982#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
983 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ 983 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
984 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ 984 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
985 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ 985 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
986 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ 986 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
987 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ 987 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
988 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ 988 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
989 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 989 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
990 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } 990 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
991 991
992/* Macro to expand the PMEVCNTRn_EL0 register */ 992/* Macro to expand the PMEVCNTRn_EL0 register */
993#define PMU_PMEVCNTR_EL0(n) \ 993#define PMU_PMEVCNTR_EL0(n) \
@@ -1540,7 +1540,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1540 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, 1540 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1541 { SYS_DESC(SYS_CTR_EL0), access_ctr }, 1541 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1542 1542
1543 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, 1543 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1544 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, 1544 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1545 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, 1545 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1546 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, 1546 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@@ -2254,13 +2254,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2254} 2254}
2255 2255
2256static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, 2256static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2257 const struct sys_reg_desc *table, size_t num) 2257 const struct sys_reg_desc *table, size_t num,
2258 unsigned long *bmap)
2258{ 2259{
2259 unsigned long i; 2260 unsigned long i;
2260 2261
2261 for (i = 0; i < num; i++) 2262 for (i = 0; i < num; i++)
2262 if (table[i].reset) 2263 if (table[i].reset) {
2264 int reg = table[i].reg;
2265
2263 table[i].reset(vcpu, &table[i]); 2266 table[i].reset(vcpu, &table[i]);
2267 if (reg > 0 && reg < NR_SYS_REGS)
2268 set_bit(reg, bmap);
2269 }
2264} 2270}
2265 2271
2266/** 2272/**
@@ -2774,18 +2780,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2774{ 2780{
2775 size_t num; 2781 size_t num;
2776 const struct sys_reg_desc *table; 2782 const struct sys_reg_desc *table;
2777 2783 DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
2778 /* Catch someone adding a register without putting in reset entry. */
2779 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2780 2784
2781 /* Generic chip reset first (so target could override). */ 2785 /* Generic chip reset first (so target could override). */
2782 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2786 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
2783 2787
2784 table = get_target_table(vcpu->arch.target, true, &num); 2788 table = get_target_table(vcpu->arch.target, true, &num);
2785 reset_sys_reg_descs(vcpu, table, num); 2789 reset_sys_reg_descs(vcpu, table, num, bmap);
2786 2790
2787 for (num = 1; num < NR_SYS_REGS; num++) { 2791 for (num = 1; num < NR_SYS_REGS; num++) {
2788 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, 2792 if (WARN(!test_bit(num, bmap),
2789 "Didn't reset __vcpu_sys_reg(%zi)\n", num)) 2793 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2790 break; 2794 break;
2791 } 2795 }
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 1d3f0b5a9940..bd2b039f43a6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -14,9 +14,7 @@
14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
15 unsigned long attrs) 15 unsigned long attrs)
16{ 16{
17 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) 17 return pgprot_writecombine(prot);
18 return pgprot_writecombine(prot);
19 return prot;
20} 18}
21 19
22void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 20void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9568c116ac7f..cfd65b63f36f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr,
777 debug_fault_info[nr].name = name; 777 debug_fault_info[nr].name = name;
778} 778}
779 779
780/*
781 * In debug exception context, we explicitly disable preemption despite
782 * having interrupts disabled.
783 * This serves two purposes: it makes it much less likely that we would
784 * accidentally schedule in exception context and it will force a warning
785 * if we somehow manage to schedule by accident.
786 */
787static void debug_exception_enter(struct pt_regs *regs)
788{
789 /*
790 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
791 * already disabled to preserve the last enabled/disabled addresses.
792 */
793 if (interrupts_enabled(regs))
794 trace_hardirqs_off();
795
796 if (user_mode(regs)) {
797 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
798 } else {
799 /*
800 * We might have interrupted pretty much anything. In
801 * fact, if we're a debug exception, we can even interrupt
802 * NMI processing. We don't want this code makes in_nmi()
803 * to return true, but we need to notify RCU.
804 */
805 rcu_nmi_enter();
806 }
807
808 preempt_disable();
809
810 /* This code is a bit fragile. Test it. */
811 RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
812}
813NOKPROBE_SYMBOL(debug_exception_enter);
814
815static void debug_exception_exit(struct pt_regs *regs)
816{
817 preempt_enable_no_resched();
818
819 if (!user_mode(regs))
820 rcu_nmi_exit();
821
822 if (interrupts_enabled(regs))
823 trace_hardirqs_on();
824}
825NOKPROBE_SYMBOL(debug_exception_exit);
826
780#ifdef CONFIG_ARM64_ERRATUM_1463225 827#ifdef CONFIG_ARM64_ERRATUM_1463225
781DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); 828DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
782 829
@@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
817 if (cortex_a76_erratum_1463225_debug_handler(regs)) 864 if (cortex_a76_erratum_1463225_debug_handler(regs))
818 return; 865 return;
819 866
820 /* 867 debug_exception_enter(regs);
821 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
822 * already disabled to preserve the last enabled/disabled addresses.
823 */
824 if (interrupts_enabled(regs))
825 trace_hardirqs_off();
826 868
827 if (user_mode(regs) && !is_ttbr0_addr(pc)) 869 if (user_mode(regs) && !is_ttbr0_addr(pc))
828 arm64_apply_bp_hardening(); 870 arm64_apply_bp_hardening();
@@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
832 inf->sig, inf->code, (void __user *)pc, esr); 874 inf->sig, inf->code, (void __user *)pc, esr);
833 } 875 }
834 876
835 if (interrupts_enabled(regs)) 877 debug_exception_exit(regs);
836 trace_hardirqs_on();
837} 878}
838NOKPROBE_SYMBOL(do_debug_exception); 879NOKPROBE_SYMBOL(do_debug_exception);
diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h
index b079ec715cdf..d150cd664873 100644
--- a/arch/csky/include/uapi/asm/byteorder.h
+++ b/arch/csky/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 3
4#ifndef __ASM_CSKY_BYTEORDER_H 4#ifndef __ASM_CSKY_BYTEORDER_H
diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h
index ddf2f39aa925..ed7fad1ea20d 100644
--- a/arch/csky/include/uapi/asm/cachectl.h
+++ b/arch/csky/include/uapi/asm/cachectl.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 2
3#ifndef __ASM_CSKY_CACHECTL_H 3#ifndef __ASM_CSKY_CACHECTL_H
4#define __ASM_CSKY_CACHECTL_H 4#define __ASM_CSKY_CACHECTL_H
diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h
index ee323d818592..49d4e147a559 100644
--- a/arch/csky/include/uapi/asm/perf_regs.h
+++ b/arch/csky/include/uapi/asm/perf_regs.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. 2// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
3 3
4#ifndef _ASM_CSKY_PERF_REGS_H 4#ifndef _ASM_CSKY_PERF_REGS_H
diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h
index 4e248d5b86ef..66b2268e324e 100644
--- a/arch/csky/include/uapi/asm/ptrace.h
+++ b/arch/csky/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 3
4#ifndef _CSKY_PTRACE_H 4#ifndef _CSKY_PTRACE_H
diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h
index e81e7ff11e36..670c020f2cb8 100644
--- a/arch/csky/include/uapi/asm/sigcontext.h
+++ b/arch/csky/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 3
4#ifndef __ASM_CSKY_SIGCONTEXT_H 4#ifndef __ASM_CSKY_SIGCONTEXT_H
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index ec60e49cea66..211c983c7282 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 3
4#define __ARCH_WANT_SYS_CLONE 4#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 1f730ded5224..cc88a08bc1f7 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -398,6 +398,7 @@ static int dwc3_octeon_clocks_start(struct device *dev, u64 base)
398 default: 398 default:
399 dev_err(dev, "Invalid ref_clk %u, using 100000000 instead\n", 399 dev_err(dev, "Invalid ref_clk %u, using 100000000 instead\n",
400 clock_rate); 400 clock_rate);
401 /* fall through */
401 case 100000000: 402 case 100000000:
402 mpll_mul = 0x19; 403 mpll_mul = 0x19;
403 if (ref_clk_sel < 2) 404 if (ref_clk_sel < 2)
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index 52cf96ea43e5..cbc7cdae1c6a 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK: 46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) 47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
48 return 0x0000000000003CB0ull; 48 return 0x0000000000003CB0ull;
49 /* Else, fall through */
49 default: 50 default:
50 return 0x0000000000023CB0ull; 51 return 0x0000000000023CB0ull;
51 } 52 }
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index e0dd66881da6..f777e44653d5 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -69,6 +69,8 @@ static int __populate_cache_leaves(unsigned int cpu)
69 if (c->tcache.waysize) 69 if (c->tcache.waysize)
70 populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); 70 populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
71 71
72 this_cpu_ci->cpu_map_populated = true;
73
72 return 0; 74 return 0;
73} 75}
74 76
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 5f209f111e59..df7ddd246eaa 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
32 32
33static int __init init_pit_clocksource(void) 33static int __init init_pit_clocksource(void)
34{ 34{
35 if (num_possible_cpus() > 1) /* PIT does not scale! */ 35 if (num_possible_cpus() > 1 || /* PIT does not scale! */
36 !clockevent_state_periodic(&i8253_clockevent))
36 return 0; 37 return 0;
37 38
38 return clocksource_i8253_init(); 39 return clocksource_i8253_init();
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index e5de6bac8197..754094b40a75 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -140,6 +140,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
140 /* These are unconditional and in j_format. */ 140 /* These are unconditional and in j_format. */
141 case jal_op: 141 case jal_op:
142 arch->gprs[31] = instpc + 8; 142 arch->gprs[31] = instpc + 8;
143 /* fall through */
143 case j_op: 144 case j_op:
144 epc += 4; 145 epc += 4;
145 epc >>= 28; 146 epc >>= 28;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 2cfe839f0b3a..1109924560d8 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -150,16 +150,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
150 return 0; 150 return 0;
151} 151}
152 152
153bool kvm_arch_has_vcpu_debugfs(void)
154{
155 return false;
156}
157
158int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
159{
160 return 0;
161}
162
163void kvm_mips_free_vcpus(struct kvm *kvm) 153void kvm_mips_free_vcpus(struct kvm *kvm)
164{ 154{
165 unsigned int i; 155 unsigned int i;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 7c04b17f4a48..96c13a0ab078 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -172,12 +172,15 @@ static void mipsxx_cpu_setup(void *args)
172 case 4: 172 case 4:
173 w_c0_perfctrl3(0); 173 w_c0_perfctrl3(0);
174 w_c0_perfcntr3(reg.counter[3]); 174 w_c0_perfcntr3(reg.counter[3]);
175 /* fall through */
175 case 3: 176 case 3:
176 w_c0_perfctrl2(0); 177 w_c0_perfctrl2(0);
177 w_c0_perfcntr2(reg.counter[2]); 178 w_c0_perfcntr2(reg.counter[2]);
179 /* fall through */
178 case 2: 180 case 2:
179 w_c0_perfctrl1(0); 181 w_c0_perfctrl1(0);
180 w_c0_perfcntr1(reg.counter[1]); 182 w_c0_perfcntr1(reg.counter[1]);
183 /* fall through */
181 case 1: 184 case 1:
182 w_c0_perfctrl0(0); 185 w_c0_perfctrl0(0);
183 w_c0_perfcntr0(reg.counter[0]); 186 w_c0_perfcntr0(reg.counter[0]);
@@ -195,10 +198,13 @@ static void mipsxx_cpu_start(void *args)
195 switch (counters) { 198 switch (counters) {
196 case 4: 199 case 4:
197 w_c0_perfctrl3(WHAT | reg.control[3]); 200 w_c0_perfctrl3(WHAT | reg.control[3]);
201 /* fall through */
198 case 3: 202 case 3:
199 w_c0_perfctrl2(WHAT | reg.control[2]); 203 w_c0_perfctrl2(WHAT | reg.control[2]);
204 /* fall through */
200 case 2: 205 case 2:
201 w_c0_perfctrl1(WHAT | reg.control[1]); 206 w_c0_perfctrl1(WHAT | reg.control[1]);
207 /* fall through */
202 case 1: 208 case 1:
203 w_c0_perfctrl0(WHAT | reg.control[0]); 209 w_c0_perfctrl0(WHAT | reg.control[0]);
204 } 210 }
@@ -215,10 +221,13 @@ static void mipsxx_cpu_stop(void *args)
215 switch (counters) { 221 switch (counters) {
216 case 4: 222 case 4:
217 w_c0_perfctrl3(0); 223 w_c0_perfctrl3(0);
224 /* fall through */
218 case 3: 225 case 3:
219 w_c0_perfctrl2(0); 226 w_c0_perfctrl2(0);
227 /* fall through */
220 case 2: 228 case 2:
221 w_c0_perfctrl1(0); 229 w_c0_perfctrl1(0);
230 /* fall through */
222 case 1: 231 case 1:
223 w_c0_perfctrl0(0); 232 w_c0_perfctrl0(0);
224 } 233 }
@@ -236,6 +245,7 @@ static int mipsxx_perfcount_handler(void)
236 245
237 switch (counters) { 246 switch (counters) {
238#define HANDLE_COUNTER(n) \ 247#define HANDLE_COUNTER(n) \
248 /* fall through */ \
239 case n + 1: \ 249 case n + 1: \
240 control = r_c0_perfctrl ## n(); \ 250 control = r_c0_perfctrl ## n(); \
241 counter = r_c0_perfcntr ## n(); \ 251 counter = r_c0_perfcntr ## n(); \
@@ -297,12 +307,15 @@ static void reset_counters(void *arg)
297 case 4: 307 case 4:
298 w_c0_perfctrl3(0); 308 w_c0_perfctrl3(0);
299 w_c0_perfcntr3(0); 309 w_c0_perfcntr3(0);
310 /* fall through */
300 case 3: 311 case 3:
301 w_c0_perfctrl2(0); 312 w_c0_perfctrl2(0);
302 w_c0_perfcntr2(0); 313 w_c0_perfcntr2(0);
314 /* fall through */
303 case 2: 315 case 2:
304 w_c0_perfctrl1(0); 316 w_c0_perfctrl1(0);
305 w_c0_perfcntr1(0); 317 w_c0_perfcntr1(0);
318 /* fall through */
306 case 1: 319 case 1:
307 w_c0_perfctrl0(0); 320 w_c0_perfctrl0(0);
308 w_c0_perfcntr0(0); 321 w_c0_perfcntr0(0);
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index d02eb9d16b55..925c72348fb6 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -474,6 +474,7 @@ static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
474 if (PCI_SLOT(devfn) == 0) 474 if (PCI_SLOT(devfn) == 0)
475 return bcm_pcie_readl(PCIE_DLSTATUS_REG) 475 return bcm_pcie_readl(PCIE_DLSTATUS_REG)
476 & DLSTATUS_PHYLINKUP; 476 & DLSTATUS_PHYLINKUP;
477 /* else, fall through */
477 default: 478 default:
478 return false; 479 return false;
479 } 480 }
diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h
index 14b1931be69c..b65b169778e3 100644
--- a/arch/mips/vdso/vdso.h
+++ b/arch/mips/vdso/vdso.h
@@ -9,6 +9,7 @@
9#if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT) 9#if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
10 10
11/* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */ 11/* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
12#define BUILD_VDSO32_64
12#undef CONFIG_64BIT 13#undef CONFIG_64BIT
13#define CONFIG_32BIT 1 14#define CONFIG_32BIT 1
14#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h
index b5d58ea8decb..bc0b92ab8c15 100644
--- a/arch/nds32/include/uapi/asm/auxvec.h
+++ b/arch/nds32/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#ifndef __ASM_AUXVEC_H 4#ifndef __ASM_AUXVEC_H
diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h
index 511e653c709d..c264ef12c49c 100644
--- a/arch/nds32/include/uapi/asm/byteorder.h
+++ b/arch/nds32/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#ifndef __NDS32_BYTEORDER_H__ 4#ifndef __NDS32_BYTEORDER_H__
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h
index 73793662815c..31b9b439d819 100644
--- a/arch/nds32/include/uapi/asm/cachectl.h
+++ b/arch/nds32/include/uapi/asm/cachectl.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 1994, 1995, 1996 by Ralf Baechle 2// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
3// Copyright (C) 2005-2017 Andes Technology Corporation 3// Copyright (C) 2005-2017 Andes Technology Corporation
4#ifndef _ASM_CACHECTL 4#ifndef _ASM_CACHECTL
diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
index d54a5d6c6538..f17396db16ec 100644
--- a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
+++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* Copyright (C) 2005-2019 Andes Technology Corporation */ 2/* Copyright (C) 2005-2019 Andes Technology Corporation */
3#ifndef _FP_UDF_IEX_CRTL_H 3#ifndef _FP_UDF_IEX_CRTL_H
4#define _FP_UDF_IEX_CRTL_H 4#define _FP_UDF_IEX_CRTL_H
diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h
index 2977534a6bd3..48d00328d328 100644
--- a/arch/nds32/include/uapi/asm/param.h
+++ b/arch/nds32/include/uapi/asm/param.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#ifndef __ASM_NDS32_PARAM_H 4#ifndef __ASM_NDS32_PARAM_H
diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h
index 1a6e01c00e6f..d76217c7c010 100644
--- a/arch/nds32/include/uapi/asm/ptrace.h
+++ b/arch/nds32/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#ifndef __UAPI_ASM_NDS32_PTRACE_H 4#ifndef __UAPI_ASM_NDS32_PTRACE_H
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
index dc89af7ddcc3..6c1e6648878f 100644
--- a/arch/nds32/include/uapi/asm/sigcontext.h
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#ifndef _ASMNDS32_SIGCONTEXT_H 4#ifndef _ASMNDS32_SIGCONTEXT_H
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index a0b2f7b9c0f2..410795e280fe 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#define __ARCH_WANT_STAT64 4#define __ARCH_WANT_STAT64
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index fe61513982b4..330b19fcd990 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,6 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
316 regs->uregs[0] = -EINTR; 316 regs->uregs[0] = -EINTR;
317 break; 317 break;
318 } 318 }
319 /* Else, fall through */
319 case -ERESTARTNOINTR: 320 case -ERESTARTNOINTR:
320 regs->uregs[0] = regs->orig_r0; 321 regs->uregs[0] = regs->orig_r0;
321 regs->ipc -= 4; 322 regs->ipc -= 4;
@@ -360,6 +361,7 @@ static void do_signal(struct pt_regs *regs)
360 switch (regs->uregs[0]) { 361 switch (regs->uregs[0]) {
361 case -ERESTART_RESTARTBLOCK: 362 case -ERESTART_RESTARTBLOCK:
362 regs->uregs[15] = __NR_restart_syscall; 363 regs->uregs[15] = __NR_restart_syscall;
364 /* Fall through */
363 case -ERESTARTNOHAND: 365 case -ERESTARTNOHAND:
364 case -ERESTARTSYS: 366 case -ERESTARTSYS:
365 case -ERESTARTNOINTR: 367 case -ERESTARTNOINTR:
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 8acb8fa1f8d6..3b77d729057f 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -19,8 +19,6 @@
19 19
20KBUILD_IMAGE := vmlinuz 20KBUILD_IMAGE := vmlinuz
21 21
22KBUILD_DEFCONFIG := default_defconfig
23
24NM = sh $(srctree)/arch/parisc/nm 22NM = sh $(srctree)/arch/parisc/nm
25CHECKFLAGS += -D__hppa__=1 23CHECKFLAGS += -D__hppa__=1
26LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
@@ -182,5 +180,8 @@ define archhelp
182 @echo ' zinstall - Install compressed vmlinuz kernel' 180 @echo ' zinstall - Install compressed vmlinuz kernel'
183endef 181endef
184 182
183archclean:
184 $(Q)$(MAKE) $(clean)=$(boot)
185
185archheaders: 186archheaders:
186 $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all 187 $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index 2da8624e5cf6..1e5879c6a752 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -12,6 +12,7 @@ UBSAN_SANITIZE := n
12targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 12targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
13targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 13targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
14targets += misc.o piggy.o sizes.h head.o real2.o firmware.o 14targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
15targets += real2.S firmware.c
15 16
16KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER 17KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
17KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 18KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -55,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h
55CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER 56CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
56$(obj)/vmlinux.lds: $(obj)/sizes.h 57$(obj)/vmlinux.lds: $(obj)/sizes.h
57 58
58$(obj)/vmlinux.bin: vmlinux 59OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
60$(obj)/vmlinux.bin: vmlinux FORCE
59 $(call if_changed,objcopy) 61 $(call if_changed,objcopy)
60 62
61vmlinux.bin.all-y := $(obj)/vmlinux.bin 63vmlinux.bin.all-y := $(obj)/vmlinux.bin
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
index bfd7872739a3..2ac3a643f2eb 100644
--- a/arch/parisc/boot/compressed/vmlinux.lds.S
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -48,8 +48,8 @@ SECTIONS
48 *(.rodata.compressed) 48 *(.rodata.compressed)
49 } 49 }
50 50
51 /* bootloader code and data starts behind area of extracted kernel */ 51 /* bootloader code and data starts at least behind area of extracted kernel */
52 . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START); 52 . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
53 53
54 /* align on next page boundary */ 54 /* align on next page boundary */
55 . = ALIGN(4096); 55 . = ALIGN(4096);
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/defconfig
index 5b877ca34ebf..5b877ca34ebf 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/defconfig
diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h
index e09cf2deeafe..904034da4974 100644
--- a/arch/parisc/include/asm/kprobes.h
+++ b/arch/parisc/include/asm/kprobes.h
@@ -50,6 +50,10 @@ struct kprobe_ctlblk {
50 50
51int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs); 51int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs);
52int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs); 52int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs);
53static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
54{
55 return 0;
56}
53 57
54#endif /* CONFIG_KPROBES */ 58#endif /* CONFIG_KPROBES */
55#endif /* _PARISC_KPROBES_H */ 59#endif /* _PARISC_KPROBES_H */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index a39b079e73f2..6d58c1739b42 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
2#ifndef _PARISC_PGTABLE_H 2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H 3#define _PARISC_PGTABLE_H
4 4
5#include <asm/page.h>
5#include <asm-generic/4level-fixup.h> 6#include <asm-generic/4level-fixup.h>
6 7
7#include <asm/fixmap.h> 8#include <asm/fixmap.h>
@@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
98 99
99#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
100 101
101#include <asm/page.h>
102
103#define pte_ERROR(e) \ 102#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
105#define pmd_ERROR(e) \ 104#define pmd_ERROR(e) \
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index d784ccdd8fef..b6fb30f2e4bf 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -181,8 +181,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
181 for (i = 0; i < ARRAY_SIZE(insn); i++) 181 for (i = 0; i < ARRAY_SIZE(insn); i++)
182 insn[i] = INSN_NOP; 182 insn[i] = INSN_NOP;
183 183
184 __patch_text((void *)rec->ip, INSN_NOP);
184 __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), 185 __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
185 insn, sizeof(insn)); 186 insn, sizeof(insn)-4);
186 return 0; 187 return 0;
187} 188}
188#endif 189#endif
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index ba67893a1d72..df46b0e5a915 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -63,7 +63,7 @@ ENTRY_CFI(flush_tlb_all_local)
63 63
64 /* Flush Instruction Tlb */ 64 /* Flush Instruction Tlb */
65 65
66 LDREG ITLB_SID_BASE(%r1), %r20 6688: LDREG ITLB_SID_BASE(%r1), %r20
67 LDREG ITLB_SID_STRIDE(%r1), %r21 67 LDREG ITLB_SID_STRIDE(%r1), %r21
68 LDREG ITLB_SID_COUNT(%r1), %r22 68 LDREG ITLB_SID_COUNT(%r1), %r22
69 LDREG ITLB_OFF_BASE(%r1), %arg0 69 LDREG ITLB_OFF_BASE(%r1), %arg0
@@ -103,6 +103,7 @@ fitonemiddle: /* Loop if LOOP = 1 */
103 add %r21, %r20, %r20 /* increment space */ 103 add %r21, %r20, %r20 /* increment space */
104 104
105fitdone: 105fitdone:
106 ALTERNATIVE(88b, fitdone, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
106 107
107 /* Flush Data Tlb */ 108 /* Flush Data Tlb */
108 109
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index b6c4b254901a..3747a0cbd3b8 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -18,3 +18,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
18# other very old or stripped-down PA-RISC CPUs -- not currently supported 18# other very old or stripped-down PA-RISC CPUs -- not currently supported
19 19
20obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o 20obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o
21CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 6dd4669ce7a5..adbd5e2144a3 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -66,6 +66,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
66 case 0x30000000: /* coproc2 */ 66 case 0x30000000: /* coproc2 */
67 if (bit22set(inst)) 67 if (bit22set(inst))
68 return VM_WRITE; 68 return VM_WRITE;
69 /* fall through */
69 70
70 case 0x0: /* indexed/memory management */ 71 case 0x0: /* indexed/memory management */
71 if (bit22set(inst)) { 72 if (bit22set(inst)) {
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index b3388d95f451..45e3137ccd71 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -107,22 +107,22 @@ extern void _set_L3CR(unsigned long);
107 107
108static inline void dcbz(void *addr) 108static inline void dcbz(void *addr)
109{ 109{
110 __asm__ __volatile__ ("dcbz %y0" : : "Z"(*(u8 *)addr) : "memory"); 110 __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
111} 111}
112 112
113static inline void dcbi(void *addr) 113static inline void dcbi(void *addr)
114{ 114{
115 __asm__ __volatile__ ("dcbi %y0" : : "Z"(*(u8 *)addr) : "memory"); 115 __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
116} 116}
117 117
118static inline void dcbf(void *addr) 118static inline void dcbf(void *addr)
119{ 119{
120 __asm__ __volatile__ ("dcbf %y0" : : "Z"(*(u8 *)addr) : "memory"); 120 __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
121} 121}
122 122
123static inline void dcbst(void *addr) 123static inline void dcbst(void *addr)
124{ 124{
125 __asm__ __volatile__ ("dcbst %y0" : : "Z"(*(u8 *)addr) : "memory"); 125 __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
126} 126}
127#endif /* !__ASSEMBLY__ */ 127#endif /* !__ASSEMBLY__ */
128#endif /* __KERNEL__ */ 128#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 463c63a9fcf1..11112023e327 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -302,9 +302,14 @@
302#define H_SCM_UNBIND_MEM 0x3F0 302#define H_SCM_UNBIND_MEM 0x3F0
303#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4 303#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4
304#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8 304#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8
305#define H_SCM_MEM_QUERY 0x3FC 305#define H_SCM_UNBIND_ALL 0x3FC
306#define H_SCM_BLOCK_CLEAR 0x400 306#define H_SCM_HEALTH 0x400
307#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR 307#define H_SCM_PERFORMANCE_STATS 0x418
308#define MAX_HCALL_OPCODE H_SCM_PERFORMANCE_STATS
309
310/* Scope args for H_SCM_UNBIND_ALL */
311#define H_UNBIND_SCOPE_ALL (0x1)
312#define H_UNBIND_SCOPE_DRC (0x2)
308 313
309/* H_VIOCTL functions */ 314/* H_VIOCTL functions */
310#define H_GET_VIOA_DUMP_SIZE 0x01 315#define H_GET_VIOA_DUMP_SIZE 0x01
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index dc9a1ca70edf..c6bbe9778d3c 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -27,11 +27,10 @@ static inline void ppc_set_pmu_inuse(int inuse)
27#ifdef CONFIG_PPC_PSERIES 27#ifdef CONFIG_PPC_PSERIES
28 get_lppaca()->pmcregs_in_use = inuse; 28 get_lppaca()->pmcregs_in_use = inuse;
29#endif 29#endif
30 } else { 30 }
31#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 31#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
32 get_paca()->pmcregs_in_use = inuse; 32 get_paca()->pmcregs_in_use = inuse;
33#endif 33#endif
34 }
35#endif 34#endif
36} 35}
37 36
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 68473c3c471c..b0720c7c3fcf 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -49,6 +49,7 @@
49#define __ARCH_WANT_SYS_FORK 49#define __ARCH_WANT_SYS_FORK
50#define __ARCH_WANT_SYS_VFORK 50#define __ARCH_WANT_SYS_VFORK
51#define __ARCH_WANT_SYS_CLONE 51#define __ARCH_WANT_SYS_CLONE
52#define __ARCH_WANT_SYS_CLONE3
52 53
53#endif /* __ASSEMBLY__ */ 54#endif /* __ASSEMBLY__ */
54#endif /* _ASM_POWERPC_UNISTD_H_ */ 55#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
index b551b741653d..5e1e648aeec4 100644
--- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h
+++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ 2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__ 3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4 4
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index 01555c6ae0f5..be48c2215fa2 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -31,7 +31,7 @@
31 * Struct fields are always 32 or 64 bit aligned, depending on them being 32 31 * Struct fields are always 32 or 64 bit aligned, depending on them being 32
32 * or 64 bit wide respectively. 32 * or 64 bit wide respectively.
33 * 33 *
34 * See Documentation/virtual/kvm/ppc-pv.txt 34 * See Documentation/virt/kvm/ppc-pv.txt
35 */ 35 */
36struct kvm_vcpu_arch_shared { 36struct kvm_vcpu_arch_shared {
37 __u64 scratch1; 37 __u64 scratch1;
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 7107ad86de65..92045ed64976 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -176,9 +176,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
176 ret |= __get_user_inatomic(temp.v[1], p++); 176 ret |= __get_user_inatomic(temp.v[1], p++);
177 ret |= __get_user_inatomic(temp.v[2], p++); 177 ret |= __get_user_inatomic(temp.v[2], p++);
178 ret |= __get_user_inatomic(temp.v[3], p++); 178 ret |= __get_user_inatomic(temp.v[3], p++);
179 /* fall through */
179 case 4: 180 case 4:
180 ret |= __get_user_inatomic(temp.v[4], p++); 181 ret |= __get_user_inatomic(temp.v[4], p++);
181 ret |= __get_user_inatomic(temp.v[5], p++); 182 ret |= __get_user_inatomic(temp.v[5], p++);
183 /* fall through */
182 case 2: 184 case 2:
183 ret |= __get_user_inatomic(temp.v[6], p++); 185 ret |= __get_user_inatomic(temp.v[6], p++);
184 ret |= __get_user_inatomic(temp.v[7], p++); 186 ret |= __get_user_inatomic(temp.v[7], p++);
@@ -259,9 +261,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
259 ret |= __put_user_inatomic(data.v[1], p++); 261 ret |= __put_user_inatomic(data.v[1], p++);
260 ret |= __put_user_inatomic(data.v[2], p++); 262 ret |= __put_user_inatomic(data.v[2], p++);
261 ret |= __put_user_inatomic(data.v[3], p++); 263 ret |= __put_user_inatomic(data.v[3], p++);
264 /* fall through */
262 case 4: 265 case 4:
263 ret |= __put_user_inatomic(data.v[4], p++); 266 ret |= __put_user_inatomic(data.v[4], p++);
264 ret |= __put_user_inatomic(data.v[5], p++); 267 ret |= __put_user_inatomic(data.v[5], p++);
268 /* fall through */
265 case 2: 269 case 2:
266 ret |= __put_user_inatomic(data.v[6], p++); 270 ret |= __put_user_inatomic(data.v[6], p++);
267 ret |= __put_user_inatomic(data.v[7], p++); 271 ret |= __put_user_inatomic(data.v[7], p++);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 85fdb6d879f1..54fab22c9a43 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -597,6 +597,14 @@ ppc_clone:
597 stw r0,_TRAP(r1) /* register set saved */ 597 stw r0,_TRAP(r1) /* register set saved */
598 b sys_clone 598 b sys_clone
599 599
600 .globl ppc_clone3
601ppc_clone3:
602 SAVE_NVGPRS(r1)
603 lwz r0,_TRAP(r1)
604 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
605 stw r0,_TRAP(r1) /* register set saved */
606 b sys_clone3
607
600 .globl ppc_swapcontext 608 .globl ppc_swapcontext
601ppc_swapcontext: 609ppc_swapcontext:
602 SAVE_NVGPRS(r1) 610 SAVE_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d9105fcf4021..0a0b5310f54a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -487,6 +487,11 @@ _GLOBAL(ppc_clone)
487 bl sys_clone 487 bl sys_clone
488 b .Lsyscall_exit 488 b .Lsyscall_exit
489 489
490_GLOBAL(ppc_clone3)
491 bl save_nvgprs
492 bl sys_clone3
493 b .Lsyscall_exit
494
490_GLOBAL(ppc32_swapcontext) 495_GLOBAL(ppc32_swapcontext)
491 bl save_nvgprs 496 bl save_nvgprs
492 bl compat_sys_swapcontext 497 bl compat_sys_swapcontext
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index eee5bef736c8..6ba3cc2ef8ab 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1531,7 +1531,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
1531 * 1531 *
1532 * Call convention: 1532 * Call convention:
1533 * 1533 *
1534 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt 1534 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
1535 * 1535 *
1536 * For hypercalls, the register convention is as follows: 1536 * For hypercalls, the register convention is as follows:
1537 * r0 volatile 1537 * r0 volatile
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index f50b708d6d77..98600b276f76 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1198,6 +1198,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
1198 goto bad; 1198 goto bad;
1199 1199
1200 if (MSR_TM_ACTIVE(msr_hi<<32)) { 1200 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1201 /* Trying to start TM on non TM system */
1202 if (!cpu_has_feature(CPU_FTR_TM))
1203 goto bad;
1201 /* We only recheckpoint on return if we're 1204 /* We only recheckpoint on return if we're
1202 * transaction. 1205 * transaction.
1203 */ 1206 */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2f80e270c7b0..117515564ec7 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -771,6 +771,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
771 if (MSR_TM_ACTIVE(msr)) { 771 if (MSR_TM_ACTIVE(msr)) {
772 /* We recheckpoint on return. */ 772 /* We recheckpoint on return. */
773 struct ucontext __user *uc_transact; 773 struct ucontext __user *uc_transact;
774
775 /* Trying to start TM on non TM system */
776 if (!cpu_has_feature(CPU_FTR_TM))
777 goto badframe;
778
774 if (__get_user(uc_transact, &uc->uc_link)) 779 if (__get_user(uc_transact, &uc->uc_link))
775 goto badframe; 780 goto badframe;
776 if (restore_tm_sigcontexts(current, &uc->uc_mcontext, 781 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 3331749aab20..43f736ed47f2 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -516,4 +516,4 @@
516432 common fsmount sys_fsmount 516432 common fsmount sys_fsmount
517433 common fspick sys_fspick 517433 common fspick sys_fspick
518434 common pidfd_open sys_pidfd_open 518434 common pidfd_open sys_pidfd_open
519# 435 reserved for clone3 519435 nospu clone3 ppc_clone3
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 653936177857..18f244aad7aa 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -239,6 +239,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
239 case 2: 239 case 2:
240 case 6: 240 case 6:
241 pte->may_write = true; 241 pte->may_write = true;
242 /* fall through */
242 case 3: 243 case 3:
243 case 5: 244 case 5:
244 case 7: 245 case 7:
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index e99a14798ab0..c4b606fe73eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -660,8 +660,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
660 } 660 }
661 tce = be64_to_cpu(tce); 661 tce = be64_to_cpu(tce);
662 662
663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) 663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
664 return H_PARAMETER; 664 ret = H_PARAMETER;
665 goto unlock_exit;
666 }
665 667
666 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 668 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
667 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, 669 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index f50bbeedfc66..b4f20f13b860 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); 556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
557 557
558 ua = 0; 558 ua = 0;
559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) 559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
560 return H_PARAMETER; 560 ret = H_PARAMETER;
561 goto unlock_exit;
562 }
561 563
562 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 564 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
563 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, 565 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ec1804f822af..cde3f5a4b3e4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3569,9 +3569,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3569 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); 3569 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3570 3570
3571 if (kvmhv_on_pseries()) { 3571 if (kvmhv_on_pseries()) {
3572 /*
3573 * We need to save and restore the guest visible part of the
3574 * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
3575 * doesn't do this for us. Note only required if pseries since
3576 * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
3577 */
3578 unsigned long host_psscr;
3572 /* call our hypervisor to load up HV regs and go */ 3579 /* call our hypervisor to load up HV regs and go */
3573 struct hv_guest_state hvregs; 3580 struct hv_guest_state hvregs;
3574 3581
3582 host_psscr = mfspr(SPRN_PSSCR_PR);
3583 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3575 kvmhv_save_hv_regs(vcpu, &hvregs); 3584 kvmhv_save_hv_regs(vcpu, &hvregs);
3576 hvregs.lpcr = lpcr; 3585 hvregs.lpcr = lpcr;
3577 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; 3586 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
@@ -3590,6 +3599,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3590 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; 3599 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3591 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); 3600 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3592 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); 3601 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3602 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3603 mtspr(SPRN_PSSCR_PR, host_psscr);
3593 3604
3594 /* H_CEDE has to be handled now, not later */ 3605 /* H_CEDE has to be handled now, not later */
3595 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && 3606 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
@@ -3654,6 +3665,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3654 vcpu->arch.vpa.dirty = 1; 3665 vcpu->arch.vpa.dirty = 1;
3655 save_pmu = lp->pmcregs_in_use; 3666 save_pmu = lp->pmcregs_in_use;
3656 } 3667 }
3668 /* Must save pmu if this guest is capable of running nested guests */
3669 save_pmu |= nesting_enabled(vcpu->kvm);
3657 3670
3658 kvmhv_save_guest_pmu(vcpu, save_pmu); 3671 kvmhv_save_guest_pmu(vcpu, save_pmu);
3659 3672
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 6ca0d7376a9f..e3ba67095895 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1986,10 +1986,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1986 1986
1987 xive->single_escalation = xive_native_has_single_escalation(); 1987 xive->single_escalation = xive_native_has_single_escalation();
1988 1988
1989 if (ret) { 1989 if (ret)
1990 kfree(xive);
1991 return ret; 1990 return ret;
1992 }
1993 1991
1994 return 0; 1992 return 0;
1995} 1993}
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 5596c8ec221a..a998823f68a3 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -1090,9 +1090,9 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
1090 xive->ops = &kvmppc_xive_native_ops; 1090 xive->ops = &kvmppc_xive_native_ops;
1091 1091
1092 if (ret) 1092 if (ret)
1093 kfree(xive); 1093 return ret;
1094 1094
1095 return ret; 1095 return 0;
1096} 1096}
1097 1097
1098/* 1098/*
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0dba7eb24f92..3e566c2e6066 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 50 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
51} 51}
52 52
53bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
54{
55 return kvm_arch_vcpu_runnable(vcpu);
56}
57
53bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 58bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
54{ 59{
55 return false; 60 return false;
@@ -452,16 +457,6 @@ err_out:
452 return -EINVAL; 457 return -EINVAL;
453} 458}
454 459
455bool kvm_arch_has_vcpu_debugfs(void)
456{
457 return false;
458}
459
460int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
461{
462 return 0;
463}
464
465void kvm_arch_destroy_vm(struct kvm *kvm) 460void kvm_arch_destroy_vm(struct kvm *kvm)
466{ 461{
467 unsigned int i; 462 unsigned int i;
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 9a5963e07a82..b8ad14bb1170 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1899,11 +1899,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
1899 * 1899 *
1900 * For guests on platforms before POWER9, we clamp the it limit to 1G 1900 * For guests on platforms before POWER9, we clamp the it limit to 1G
1901 * to avoid some funky things such as RTAS bugs etc... 1901 * to avoid some funky things such as RTAS bugs etc...
1902 *
1903 * On POWER9 we limit to 1TB in case the host erroneously told us that
1904 * the RMA was >1TB. Effective address bits 0:23 are treated as zero
1905 * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
1906 * for virtual real mode addressing and so it doesn't make sense to
1907 * have an area larger than 1TB as it can't be addressed.
1902 */ 1908 */
1903 if (!early_cpu_has_feature(CPU_FTR_HVMODE)) { 1909 if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
1904 ppc64_rma_size = first_memblock_size; 1910 ppc64_rma_size = first_memblock_size;
1905 if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) 1911 if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
1906 ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000); 1912 ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
1913 else
1914 ppc64_rma_size = min_t(u64, ppc64_rma_size,
1915 1UL << SID_SHIFT_1T);
1907 1916
1908 /* Finally limit subsequent allocations */ 1917 /* Finally limit subsequent allocations */
1909 memblock_set_current_limit(ppc64_rma_size); 1918 memblock_set_current_limit(ppc64_rma_size);
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 0d62be3cba47..74f4555a62ba 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -21,7 +21,7 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
21 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); 21 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
22} 22}
23 23
24static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) 24static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
25{ 25{
26 pmd_t *pmd; 26 pmd_t *pmd;
27 unsigned long k_cur, k_next; 27 unsigned long k_cur, k_next;
@@ -35,7 +35,10 @@ static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
35 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) 35 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
36 continue; 36 continue;
37 37
38 new = pte_alloc_one_kernel(&init_mm); 38 if (slab_is_available())
39 new = pte_alloc_one_kernel(&init_mm);
40 else
41 new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
39 42
40 if (!new) 43 if (!new)
41 return -ENOMEM; 44 return -ENOMEM;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 9259337d7374..9191a66b3bc5 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -239,7 +239,7 @@ void __init paging_init(void)
239 239
240#ifdef CONFIG_ZONE_DMA 240#ifdef CONFIG_ZONE_DMA
241 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 241 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
242 ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT); 242 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
243#endif 243#endif
244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
245#ifdef CONFIG_HIGHMEM 245#ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index c8ec670ee924..a5ac371a3f06 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -11,6 +11,7 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/libnvdimm.h> 12#include <linux/libnvdimm.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/delay.h>
14 15
15#include <asm/plpar_wrappers.h> 16#include <asm/plpar_wrappers.h>
16 17
@@ -43,8 +44,9 @@ struct papr_scm_priv {
43static int drc_pmem_bind(struct papr_scm_priv *p) 44static int drc_pmem_bind(struct papr_scm_priv *p)
44{ 45{
45 unsigned long ret[PLPAR_HCALL_BUFSIZE]; 46 unsigned long ret[PLPAR_HCALL_BUFSIZE];
46 uint64_t rc, token;
47 uint64_t saved = 0; 47 uint64_t saved = 0;
48 uint64_t token;
49 int64_t rc;
48 50
49 /* 51 /*
50 * When the hypervisor cannot map all the requested memory in a single 52 * When the hypervisor cannot map all the requested memory in a single
@@ -64,6 +66,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
64 } while (rc == H_BUSY); 66 } while (rc == H_BUSY);
65 67
66 if (rc) { 68 if (rc) {
69 /* H_OVERLAP needs a separate error path */
70 if (rc == H_OVERLAP)
71 return -EBUSY;
72
67 dev_err(&p->pdev->dev, "bind err: %lld\n", rc); 73 dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
68 return -ENXIO; 74 return -ENXIO;
69 } 75 }
@@ -78,22 +84,36 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
78static int drc_pmem_unbind(struct papr_scm_priv *p) 84static int drc_pmem_unbind(struct papr_scm_priv *p)
79{ 85{
80 unsigned long ret[PLPAR_HCALL_BUFSIZE]; 86 unsigned long ret[PLPAR_HCALL_BUFSIZE];
81 uint64_t rc, token; 87 uint64_t token = 0;
88 int64_t rc;
82 89
83 token = 0; 90 dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index);
84 91
85 /* NB: unbind has the same retry requirements mentioned above */ 92 /* NB: unbind has the same retry requirements as drc_pmem_bind() */
86 do { 93 do {
87 rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, 94
88 p->bound_addr, p->blocks, token); 95 /* Unbind of all SCM resources associated with drcIndex */
96 rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
97 p->drc_index, token);
89 token = ret[0]; 98 token = ret[0];
90 cond_resched(); 99
100 /* Check if we are stalled for some time */
101 if (H_IS_LONG_BUSY(rc)) {
102 msleep(get_longbusy_msecs(rc));
103 rc = H_BUSY;
104 } else if (rc == H_BUSY) {
105 cond_resched();
106 }
107
91 } while (rc == H_BUSY); 108 } while (rc == H_BUSY);
92 109
93 if (rc) 110 if (rc)
94 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc); 111 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
112 else
113 dev_dbg(&p->pdev->dev, "unbind drc %x complete\n",
114 p->drc_index);
95 115
96 return !!rc; 116 return rc == H_SUCCESS ? 0 : -ENXIO;
97} 117}
98 118
99static int papr_scm_meta_get(struct papr_scm_priv *p, 119static int papr_scm_meta_get(struct papr_scm_priv *p,
@@ -255,12 +275,32 @@ static const struct attribute_group *papr_scm_dimm_groups[] = {
255 NULL, 275 NULL,
256}; 276};
257 277
278static inline int papr_scm_node(int node)
279{
280 int min_dist = INT_MAX, dist;
281 int nid, min_node;
282
283 if ((node == NUMA_NO_NODE) || node_online(node))
284 return node;
285
286 min_node = first_online_node;
287 for_each_online_node(nid) {
288 dist = node_distance(node, nid);
289 if (dist < min_dist) {
290 min_dist = dist;
291 min_node = nid;
292 }
293 }
294 return min_node;
295}
296
258static int papr_scm_nvdimm_init(struct papr_scm_priv *p) 297static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
259{ 298{
260 struct device *dev = &p->pdev->dev; 299 struct device *dev = &p->pdev->dev;
261 struct nd_mapping_desc mapping; 300 struct nd_mapping_desc mapping;
262 struct nd_region_desc ndr_desc; 301 struct nd_region_desc ndr_desc;
263 unsigned long dimm_flags; 302 unsigned long dimm_flags;
303 int target_nid, online_nid;
264 304
265 p->bus_desc.ndctl = papr_scm_ndctl; 305 p->bus_desc.ndctl = papr_scm_ndctl;
266 p->bus_desc.module = THIS_MODULE; 306 p->bus_desc.module = THIS_MODULE;
@@ -299,8 +339,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
299 339
300 memset(&ndr_desc, 0, sizeof(ndr_desc)); 340 memset(&ndr_desc, 0, sizeof(ndr_desc));
301 ndr_desc.attr_groups = region_attr_groups; 341 ndr_desc.attr_groups = region_attr_groups;
302 ndr_desc.numa_node = dev_to_node(&p->pdev->dev); 342 target_nid = dev_to_node(&p->pdev->dev);
303 ndr_desc.target_node = ndr_desc.numa_node; 343 online_nid = papr_scm_node(target_nid);
344 ndr_desc.numa_node = online_nid;
345 ndr_desc.target_node = target_nid;
304 ndr_desc.res = &p->res; 346 ndr_desc.res = &p->res;
305 ndr_desc.of_node = p->dn; 347 ndr_desc.of_node = p->dn;
306 ndr_desc.provider_data = p; 348 ndr_desc.provider_data = p;
@@ -318,6 +360,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
318 ndr_desc.res, p->dn); 360 ndr_desc.res, p->dn);
319 goto err; 361 goto err;
320 } 362 }
363 if (target_nid != online_nid)
364 dev_info(dev, "Region registered with target node %d and online node %d",
365 target_nid, online_nid);
321 366
322 return 0; 367 return 0;
323 368
@@ -389,6 +434,14 @@ static int papr_scm_probe(struct platform_device *pdev)
389 434
390 /* request the hypervisor to bind this region to somewhere in memory */ 435 /* request the hypervisor to bind this region to somewhere in memory */
391 rc = drc_pmem_bind(p); 436 rc = drc_pmem_bind(p);
437
438 /* If phyp says drc memory still bound then force unbound and retry */
439 if (rc == -EBUSY) {
440 dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
441 drc_pmem_unbind(p);
442 rc = drc_pmem_bind(p);
443 }
444
392 if (rc) 445 if (rc)
393 goto err; 446 goto err;
394 447
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 082c7e1c20f0..1cdb39575eae 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -479,7 +479,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
479 * Now go through the entire mask until we find a valid 479 * Now go through the entire mask until we find a valid
480 * target. 480 * target.
481 */ 481 */
482 for (;;) { 482 do {
483 /* 483 /*
484 * We re-check online as the fallback case passes us 484 * We re-check online as the fallback case passes us
485 * an untested affinity mask 485 * an untested affinity mask
@@ -487,12 +487,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
487 if (cpu_online(cpu) && xive_try_pick_target(cpu)) 487 if (cpu_online(cpu) && xive_try_pick_target(cpu))
488 return cpu; 488 return cpu;
489 cpu = cpumask_next(cpu, mask); 489 cpu = cpumask_next(cpu, mask);
490 if (cpu == first)
491 break;
492 /* Wrap around */ 490 /* Wrap around */
493 if (cpu >= nr_cpu_ids) 491 if (cpu >= nr_cpu_ids)
494 cpu = cpumask_first(mask); 492 cpu = cpumask_first(mask);
495 } 493 } while (cpu != first);
494
496 return -1; 495 return -1;
497} 496}
498 497
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 40983491b95f..42b5ec223100 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -21,7 +21,6 @@
21 cpus { 21 cpus {
22 #address-cells = <1>; 22 #address-cells = <1>;
23 #size-cells = <0>; 23 #size-cells = <0>;
24 timebase-frequency = <1000000>;
25 cpu0: cpu@0 { 24 cpu0: cpu@0 {
26 compatible = "sifive,e51", "sifive,rocket0", "riscv"; 25 compatible = "sifive,e51", "sifive,rocket0", "riscv";
27 device_type = "cpu"; 26 device_type = "cpu";
@@ -217,5 +216,20 @@
217 #size-cells = <0>; 216 #size-cells = <0>;
218 status = "disabled"; 217 status = "disabled";
219 }; 218 };
219 eth0: ethernet@10090000 {
220 compatible = "sifive,fu540-c000-gem";
221 interrupt-parent = <&plic0>;
222 interrupts = <53>;
223 reg = <0x0 0x10090000 0x0 0x2000
224 0x0 0x100a0000 0x0 0x1000>;
225 local-mac-address = [00 00 00 00 00 00];
226 clock-names = "pclk", "hclk";
227 clocks = <&prci PRCI_CLK_GEMGXLPLL>,
228 <&prci PRCI_CLK_GEMGXLPLL>;
229 #address-cells = <1>;
230 #size-cells = <0>;
231 status = "disabled";
232 };
233
220 }; 234 };
221}; 235};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 0b55c53c08c7..93d68cbd64fe 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -76,3 +76,12 @@
76 disable-wp; 76 disable-wp;
77 }; 77 };
78}; 78};
79
80&eth0 {
81 status = "okay";
82 phy-mode = "gmii";
83 phy-handle = <&phy0>;
84 phy0: ethernet-phy@0 {
85 reg = <0>;
86 };
87};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index b7b749b18853..3efff552a261 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
34CONFIG_PCI_HOST_GENERIC=y 34CONFIG_PCI_HOST_GENERIC=y
35CONFIG_PCIE_XILINX=y 35CONFIG_PCIE_XILINX=y
36CONFIG_DEVTMPFS=y 36CONFIG_DEVTMPFS=y
37CONFIG_DEVTMPFS_MOUNT=y
37CONFIG_BLK_DEV_LOOP=y 38CONFIG_BLK_DEV_LOOP=y
38CONFIG_VIRTIO_BLK=y 39CONFIG_VIRTIO_BLK=y
39CONFIG_BLK_DEV_SD=y 40CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,10 @@ CONFIG_SERIAL_8250_CONSOLE=y
53CONFIG_SERIAL_OF_PLATFORM=y 54CONFIG_SERIAL_OF_PLATFORM=y
54CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
55CONFIG_HVC_RISCV_SBI=y 56CONFIG_HVC_RISCV_SBI=y
57CONFIG_HW_RANDOM=y
58CONFIG_HW_RANDOM_VIRTIO=y
59CONFIG_SPI=y
60CONFIG_SPI_SIFIVE=y
56# CONFIG_PTP_1588_CLOCK is not set 61# CONFIG_PTP_1588_CLOCK is not set
57CONFIG_DRM=y 62CONFIG_DRM=y
58CONFIG_DRM_RADEON=y 63CONFIG_DRM_RADEON=y
@@ -66,8 +71,9 @@ CONFIG_USB_OHCI_HCD=y
66CONFIG_USB_OHCI_HCD_PLATFORM=y 71CONFIG_USB_OHCI_HCD_PLATFORM=y
67CONFIG_USB_STORAGE=y 72CONFIG_USB_STORAGE=y
68CONFIG_USB_UAS=y 73CONFIG_USB_UAS=y
74CONFIG_MMC=y
75CONFIG_MMC_SPI=y
69CONFIG_VIRTIO_MMIO=y 76CONFIG_VIRTIO_MMIO=y
70CONFIG_SPI_SIFIVE=y
71CONFIG_EXT4_FS=y 77CONFIG_EXT4_FS=y
72CONFIG_EXT4_FS_POSIX_ACL=y 78CONFIG_EXT4_FS_POSIX_ACL=y
73CONFIG_AUTOFS4_FS=y 79CONFIG_AUTOFS4_FS=y
@@ -83,8 +89,4 @@ CONFIG_ROOT_NFS=y
83CONFIG_CRYPTO_USER_API_HASH=y 89CONFIG_CRYPTO_USER_API_HASH=y
84CONFIG_CRYPTO_DEV_VIRTIO=y 90CONFIG_CRYPTO_DEV_VIRTIO=y
85CONFIG_PRINTK_TIME=y 91CONFIG_PRINTK_TIME=y
86CONFIG_SPI=y
87CONFIG_MMC_SPI=y
88CONFIG_MMC=y
89CONFIG_DEVTMPFS_MOUNT=y
90# CONFIG_RCU_TRACE is not set 92# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index d5449ef805a3..7da93e494445 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
34CONFIG_PCI_HOST_GENERIC=y 34CONFIG_PCI_HOST_GENERIC=y
35CONFIG_PCIE_XILINX=y 35CONFIG_PCIE_XILINX=y
36CONFIG_DEVTMPFS=y 36CONFIG_DEVTMPFS=y
37CONFIG_DEVTMPFS_MOUNT=y
37CONFIG_BLK_DEV_LOOP=y 38CONFIG_BLK_DEV_LOOP=y
38CONFIG_VIRTIO_BLK=y 39CONFIG_VIRTIO_BLK=y
39CONFIG_BLK_DEV_SD=y 40CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
53CONFIG_SERIAL_OF_PLATFORM=y 54CONFIG_SERIAL_OF_PLATFORM=y
54CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
55CONFIG_HVC_RISCV_SBI=y 56CONFIG_HVC_RISCV_SBI=y
57CONFIG_HW_RANDOM=y
58CONFIG_HW_RANDOM_VIRTIO=y
56# CONFIG_PTP_1588_CLOCK is not set 59# CONFIG_PTP_1588_CLOCK is not set
57CONFIG_DRM=y 60CONFIG_DRM=y
58CONFIG_DRM_RADEON=y 61CONFIG_DRM_RADEON=y
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1efaeddf1e4b..16970f246860 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += kvm_para.h
22generic-y += local.h 22generic-y += local.h
23generic-y += local64.h 23generic-y += local64.h
24generic-y += mm-arch-hooks.h 24generic-y += mm-arch-hooks.h
25generic-y += msi.h
25generic-y += percpu.h 26generic-y += percpu.h
26generic-y += preempt.h 27generic-y += preempt.h
27generic-y += sections.h 28generic-y += sections.h
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 9c66033c3a54..161f28d04a07 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -30,10 +30,6 @@ enum fixed_addresses {
30 __end_of_fixed_addresses 30 __end_of_fixed_addresses
31}; 31};
32 32
33#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
34#define FIXADDR_TOP (VMALLOC_START)
35#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
36
37#define FIXMAP_PAGE_IO PAGE_KERNEL 33#define FIXMAP_PAGE_IO PAGE_KERNEL
38 34
39#define __early_set_fixmap __set_fixmap 35#define __early_set_fixmap __set_fixmap
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index a364aba23d55..c24a083b3e12 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -420,14 +420,22 @@ static inline void pgtable_cache_init(void)
420#define VMALLOC_END (PAGE_OFFSET - 1) 420#define VMALLOC_END (PAGE_OFFSET - 1)
421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
422 422
423#define FIXADDR_TOP VMALLOC_START
424#ifdef CONFIG_64BIT
425#define FIXADDR_SIZE PMD_SIZE
426#else
427#define FIXADDR_SIZE PGDIR_SIZE
428#endif
429#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
430
423/* 431/*
424 * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32. 432 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
425 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 433 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
426 */ 434 */
427#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
428#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 436#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
429#else 437#else
430#define TASK_SIZE VMALLOC_START 438#define TASK_SIZE FIXADDR_START
431#endif 439#endif
432 440
433#include <asm-generic/pgtable.h> 441#include <asm-generic/pgtable.h>
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 853b65ef656d..f0227bdce0f0 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
16 16
17static inline void __fstate_clean(struct pt_regs *regs) 17static inline void __fstate_clean(struct pt_regs *regs)
18{ 18{
19 regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; 19 regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
20}
21
22static inline void fstate_off(struct task_struct *task,
23 struct pt_regs *regs)
24{
25 regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
20} 26}
21 27
22static inline void fstate_save(struct task_struct *task, 28static inline void fstate_save(struct task_struct *task,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 687dd19735a7..4d9bbe8438bf 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
53} 53}
54 54
55#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) 55#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
56#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) 56
57#define flush_tlb_range(vma, start, end) \ 57#define flush_tlb_range(vma, start, end) \
58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) 58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
59#define flush_tlb_mm(mm) \ 59
60static inline void flush_tlb_page(struct vm_area_struct *vma,
61 unsigned long addr)
62{
63 flush_tlb_range(vma, addr, addr + PAGE_SIZE);
64}
65
66#define flush_tlb_mm(mm) \
60 remote_sfence_vma(mm_cpumask(mm), 0, -1) 67 remote_sfence_vma(mm_cpumask(mm), 0, -1)
61 68
62#endif /* CONFIG_SMP */ 69#endif /* CONFIG_SMP */
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
index 62716653554b..d86cb17bbabe 100644
--- a/arch/riscv/include/uapi/asm/auxvec.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2012 ARM Ltd. 3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2015 Regents of the University of California 4 * Copyright (C) 2015 Regents of the University of California
diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h
index 0b9b58b57ff6..7d0b32e3b701 100644
--- a/arch/riscv/include/uapi/asm/bitsperlong.h
+++ b/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2012 ARM Ltd. 3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2015 Regents of the University of California 4 * Copyright (C) 2015 Regents of the University of California
diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h
index 1920debc09c0..f671e16bf6af 100644
--- a/arch/riscv/include/uapi/asm/byteorder.h
+++ b/arch/riscv/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2012 ARM Ltd. 3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2015 Regents of the University of California 4 * Copyright (C) 2015 Regents of the University of California
diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h
index 7d786145183b..4e7646077056 100644
--- a/arch/riscv/include/uapi/asm/hwcap.h
+++ b/arch/riscv/include/uapi/asm/hwcap.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copied from arch/arm64/include/asm/hwcap.h 3 * Copied from arch/arm64/include/asm/hwcap.h
4 * 4 *
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
index 92d8f7cd8f84..882547f6bd5c 100644
--- a/arch/riscv/include/uapi/asm/ptrace.h
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2012 Regents of the University of California 3 * Copyright (C) 2012 Regents of the University of California
4 */ 4 */
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
index 053f809e52ce..84f2dfcfdbce 100644
--- a/arch/riscv/include/uapi/asm/sigcontext.h
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2012 Regents of the University of California 3 * Copyright (C) 2012 Regents of the University of California
4 */ 4 */
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
index b58e00cee2ec..411dd7b52ed6 100644
--- a/arch/riscv/include/uapi/asm/ucontext.h
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (C) 2012 ARM Ltd. 3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2017 SiFive, Inc. 4 * Copyright (C) 2017 SiFive, Inc.
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 0e2eeeb1fd27..13ce76cc5aff 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -18,6 +18,7 @@
18#ifdef __LP64__ 18#ifdef __LP64__
19#define __ARCH_WANT_NEW_STAT 19#define __ARCH_WANT_NEW_STAT
20#define __ARCH_WANT_SET_GET_RLIMIT 20#define __ARCH_WANT_SET_GET_RLIMIT
21#define __ARCH_WANT_SYS_CLONE3
21#endif /* __LP64__ */ 22#endif /* __LP64__ */
22 23
23#include <asm-generic/unistd.h> 24#include <asm-generic/unistd.h>
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index f23794bd1e90..fb3a082362eb 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
64 unsigned long sp) 64 unsigned long sp)
65{ 65{
66 regs->sstatus = SR_SPIE; 66 regs->sstatus = SR_SPIE;
67 if (has_fpu) 67 if (has_fpu) {
68 regs->sstatus |= SR_FS_INITIAL; 68 regs->sstatus |= SR_FS_INITIAL;
69 /*
70 * Restore the initial value to the FP register
71 * before starting the user program.
72 */
73 fstate_restore(current, regs);
74 }
69 regs->sepc = pc; 75 regs->sepc = pc;
70 regs->sp = sp; 76 regs->sp = sp;
71 set_fs(USER_DS); 77 set_fs(USER_DS);
@@ -75,10 +81,11 @@ void flush_thread(void)
75{ 81{
76#ifdef CONFIG_FPU 82#ifdef CONFIG_FPU
77 /* 83 /*
78 * Reset FPU context 84 * Reset FPU state and context
79 * frm: round to nearest, ties to even (IEEE default) 85 * frm: round to nearest, ties to even (IEEE default)
80 * fflags: accrued exceptions cleared 86 * fflags: accrued exceptions cleared
81 */ 87 */
88 fstate_off(current, task_pt_regs(current));
82 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate)); 89 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
83#endif 90#endif
84} 91}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f1d6ffe43e42..49a5852fd07d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
37# these symbols in the kernel code rather than hand-coded addresses. 37# these symbols in the kernel code rather than hand-coded addresses.
38 38
39SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ 39SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
40 -Wl,--hash-style=both 40 -Wl,--build-id -Wl,--hash-style=both
41$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE 41$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
42 $(call if_changed,vdsold) 42 $(call if_changed,vdsold)
43 43
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 8e364ebf37de..267feaa10f6a 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -5,5 +5,3 @@ lib-y += memset.o
5lib-y += uaccess.o 5lib-y += uaccess.o
6 6
7lib-$(CONFIG_64BIT) += tishift.o 7lib-$(CONFIG_64BIT) += tishift.o
8
9lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c
index 87ff89e88f2c..f51c9a03bca1 100644
--- a/arch/riscv/lib/delay.c
+++ b/arch/riscv/lib/delay.c
@@ -81,9 +81,13 @@ EXPORT_SYMBOL(__delay);
81void udelay(unsigned long usecs) 81void udelay(unsigned long usecs)
82{ 82{
83 u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT; 83 u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT;
84 u64 n;
84 85
85 if (unlikely(usecs > MAX_UDELAY_US)) { 86 if (unlikely(usecs > MAX_UDELAY_US)) {
86 __delay((u64)usecs * riscv_timebase / 1000000ULL); 87 n = (u64)usecs * riscv_timebase;
88 do_div(n, 1000000);
89
90 __delay(n);
87 return; 91 return;
88 } 92 }
89 93
diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S
deleted file mode 100644
index 3f07476a91a9..000000000000
--- a/arch/riscv/lib/udivdi3.S
+++ /dev/null
@@ -1,32 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2016-2017 Free Software Foundation, Inc.
4 */
5
6#include <linux/linkage.h>
7
8ENTRY(__udivdi3)
9 mv a2, a1
10 mv a1, a0
11 li a0, -1
12 beqz a2, .L5
13 li a3, 1
14 bgeu a2, a1, .L2
15.L1:
16 blez a2, .L2
17 slli a2, a2, 1
18 slli a3, a3, 1
19 bgtu a1, a2, .L1
20.L2:
21 li a0, 0
22.L3:
23 bltu a1, a2, .L4
24 sub a1, a1, a2
25 or a0, a0, a3
26.L4:
27 srli a3, a3, 1
28 srli a2, a2, 1
29 bnez a3, .L3
30.L5:
31 ret
32ENDPROC(__udivdi3)
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 7cba96e7587b..4cf0bddb7d92 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -36,7 +36,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
36 36
37obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o 37obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
38obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o 38obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
39obj-y += ctype.o text_dma.o 39obj-y += version.o ctype.o text_dma.o
40obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o 40obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
41obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o 41obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
42obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 42obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index ad57c2205a71..1c3b2b257637 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -8,10 +8,12 @@ void store_ipl_parmblock(void);
8void setup_boot_command_line(void); 8void setup_boot_command_line(void);
9void parse_boot_command_line(void); 9void parse_boot_command_line(void);
10void setup_memory_end(void); 10void setup_memory_end(void);
11void verify_facilities(void);
11void print_missing_facilities(void); 12void print_missing_facilities(void);
12unsigned long get_random_base(unsigned long safe_addr); 13unsigned long get_random_base(unsigned long safe_addr);
13 14
14extern int kaslr_enabled; 15extern int kaslr_enabled;
16extern const char kernel_version[];
15 17
16unsigned long read_ipl_report(unsigned long safe_offset); 18unsigned long read_ipl_report(unsigned long safe_offset);
17 19
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 028aab03a9e7..2087bed6e60f 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -361,6 +361,7 @@ ENTRY(startup_kdump)
361 .quad 0 # INITRD_SIZE 361 .quad 0 # INITRD_SIZE
362 .quad 0 # OLDMEM_BASE 362 .quad 0 # OLDMEM_BASE
363 .quad 0 # OLDMEM_SIZE 363 .quad 0 # OLDMEM_SIZE
364 .quad kernel_version # points to kernel version string
364 365
365 .org COMMAND_LINE 366 .org COMMAND_LINE
366 .byte "root=/dev/ram0 ro" 367 .byte "root=/dev/ram0 ro"
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 3c49bde8aa5e..b8aa6a9f937b 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -48,9 +48,7 @@ void store_ipl_parmblock(void)
48{ 48{
49 int rc; 49 int rc;
50 50
51 uv_set_shared(__pa(&ipl_block));
52 rc = __diag308(DIAG308_STORE, &ipl_block); 51 rc = __diag308(DIAG308_STORE, &ipl_block);
53 uv_remove_shared(__pa(&ipl_block));
54 if (rc == DIAG308_RC_OK && 52 if (rc == DIAG308_RC_OK &&
55 ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION) 53 ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
56 ipl_block_valid = 1; 54 ipl_block_valid = 1;
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3bdd8132e56b..c34a6387ce38 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -7,6 +7,7 @@
7#include <asm/timex.h> 7#include <asm/timex.h>
8#include <asm/sclp.h> 8#include <asm/sclp.h>
9#include "compressed/decompressor.h" 9#include "compressed/decompressor.h"
10#include "boot.h"
10 11
11#define PRNG_MODE_TDES 1 12#define PRNG_MODE_TDES 1
12#define PRNG_MODE_SHA512 2 13#define PRNG_MODE_SHA512 2
diff --git a/arch/s390/boot/version.c b/arch/s390/boot/version.c
new file mode 100644
index 000000000000..d32e58bdda6a
--- /dev/null
+++ b/arch/s390/boot/version.c
@@ -0,0 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <generated/utsrelease.h>
3#include <generated/compile.h>
4#include "boot.h"
5
6const char kernel_version[] = UTS_RELEASE
7 " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") " UTS_VERSION;
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index e26d4413d34c..74e78ec5beb6 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -3,6 +3,7 @@ CONFIG_POSIX_MQUEUE=y
3CONFIG_AUDIT=y 3CONFIG_AUDIT=y
4CONFIG_NO_HZ_IDLE=y 4CONFIG_NO_HZ_IDLE=y
5CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
6CONFIG_PREEMPT=y
6CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_BSD_PROCESS_ACCT_V3=y 8CONFIG_BSD_PROCESS_ACCT_V3=y
8CONFIG_TASKSTATS=y 9CONFIG_TASKSTATS=y
@@ -18,55 +19,71 @@ CONFIG_BLK_CGROUP=y
18CONFIG_CFS_BANDWIDTH=y 19CONFIG_CFS_BANDWIDTH=y
19CONFIG_RT_GROUP_SCHED=y 20CONFIG_RT_GROUP_SCHED=y
20CONFIG_CGROUP_PIDS=y 21CONFIG_CGROUP_PIDS=y
22CONFIG_CGROUP_RDMA=y
21CONFIG_CGROUP_FREEZER=y 23CONFIG_CGROUP_FREEZER=y
22CONFIG_CGROUP_HUGETLB=y 24CONFIG_CGROUP_HUGETLB=y
23CONFIG_CPUSETS=y 25CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y 26CONFIG_CGROUP_DEVICE=y
25CONFIG_CGROUP_CPUACCT=y 27CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y 28CONFIG_CGROUP_PERF=y
29CONFIG_CGROUP_BPF=y
27CONFIG_NAMESPACES=y 30CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 31CONFIG_USER_NS=y
32CONFIG_CHECKPOINT_RESTORE=y
29CONFIG_SCHED_AUTOGROUP=y 33CONFIG_SCHED_AUTOGROUP=y
30CONFIG_BLK_DEV_INITRD=y 34CONFIG_BLK_DEV_INITRD=y
31CONFIG_EXPERT=y 35CONFIG_EXPERT=y
32# CONFIG_SYSFS_SYSCALL is not set 36# CONFIG_SYSFS_SYSCALL is not set
33CONFIG_CHECKPOINT_RESTORE=y
34CONFIG_BPF_SYSCALL=y 37CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 38CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 39# CONFIG_COMPAT_BRK is not set
37CONFIG_PROFILING=y 40CONFIG_PROFILING=y
41CONFIG_LIVEPATCH=y
42CONFIG_TUNE_ZEC12=y
43CONFIG_NR_CPUS=512
44CONFIG_NUMA=y
45CONFIG_HZ_100=y
46CONFIG_KEXEC_FILE=y
47CONFIG_EXPOLINE=y
48CONFIG_EXPOLINE_AUTO=y
49CONFIG_CHSC_SCH=y
50CONFIG_VFIO_CCW=m
51CONFIG_VFIO_AP=m
52CONFIG_CRASH_DUMP=y
53CONFIG_HIBERNATION=y
54CONFIG_PM_DEBUG=y
55CONFIG_CMM=m
56CONFIG_APPLDATA_BASE=y
57CONFIG_KVM=m
58CONFIG_VHOST_NET=m
59CONFIG_VHOST_VSOCK=m
38CONFIG_OPROFILE=m 60CONFIG_OPROFILE=m
39CONFIG_KPROBES=y 61CONFIG_KPROBES=y
40CONFIG_JUMP_LABEL=y 62CONFIG_JUMP_LABEL=y
41CONFIG_STATIC_KEYS_SELFTEST=y 63CONFIG_STATIC_KEYS_SELFTEST=y
64CONFIG_REFCOUNT_FULL=y
65CONFIG_LOCK_EVENT_COUNTS=y
42CONFIG_MODULES=y 66CONFIG_MODULES=y
43CONFIG_MODULE_FORCE_LOAD=y 67CONFIG_MODULE_FORCE_LOAD=y
44CONFIG_MODULE_UNLOAD=y 68CONFIG_MODULE_UNLOAD=y
45CONFIG_MODULE_FORCE_UNLOAD=y 69CONFIG_MODULE_FORCE_UNLOAD=y
46CONFIG_MODVERSIONS=y 70CONFIG_MODVERSIONS=y
47CONFIG_MODULE_SRCVERSION_ALL=y 71CONFIG_MODULE_SRCVERSION_ALL=y
72CONFIG_MODULE_SIG=y
73CONFIG_MODULE_SIG_SHA256=y
48CONFIG_BLK_DEV_INTEGRITY=y 74CONFIG_BLK_DEV_INTEGRITY=y
49CONFIG_BLK_DEV_THROTTLING=y 75CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y 76CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y 77CONFIG_BLK_CGROUP_IOLATENCY=y
52CONFIG_PARTITION_ADVANCED=y 78CONFIG_PARTITION_ADVANCED=y
53CONFIG_IBM_PARTITION=y 79CONFIG_IBM_PARTITION=y
54CONFIG_BSD_DISKLABEL=y 80CONFIG_BSD_DISKLABEL=y
55CONFIG_MINIX_SUBPARTITION=y 81CONFIG_MINIX_SUBPARTITION=y
56CONFIG_SOLARIS_X86_PARTITION=y 82CONFIG_SOLARIS_X86_PARTITION=y
57CONFIG_UNIXWARE_DISKLABEL=y 83CONFIG_UNIXWARE_DISKLABEL=y
58CONFIG_CFQ_GROUP_IOSCHED=y 84CONFIG_IOSCHED_BFQ=y
59CONFIG_DEFAULT_DEADLINE=y 85CONFIG_BFQ_GROUP_IOSCHED=y
60CONFIG_LIVEPATCH=y 86CONFIG_BINFMT_MISC=m
61CONFIG_TUNE_ZEC12=y
62CONFIG_NR_CPUS=512
63CONFIG_NUMA=y
64CONFIG_PREEMPT=y
65CONFIG_HZ_100=y
66CONFIG_KEXEC_FILE=y
67CONFIG_KEXEC_VERIFY_SIG=y
68CONFIG_EXPOLINE=y
69CONFIG_EXPOLINE_AUTO=y
70CONFIG_MEMORY_HOTPLUG=y 87CONFIG_MEMORY_HOTPLUG=y
71CONFIG_MEMORY_HOTREMOVE=y 88CONFIG_MEMORY_HOTREMOVE=y
72CONFIG_KSM=y 89CONFIG_KSM=y
@@ -82,17 +99,8 @@ CONFIG_ZSMALLOC=m
82CONFIG_ZSMALLOC_STAT=y 99CONFIG_ZSMALLOC_STAT=y
83CONFIG_DEFERRED_STRUCT_PAGE_INIT=y 100CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
84CONFIG_IDLE_PAGE_TRACKING=y 101CONFIG_IDLE_PAGE_TRACKING=y
85CONFIG_PCI=y 102CONFIG_PERCPU_STATS=y
86CONFIG_PCI_DEBUG=y 103CONFIG_GUP_BENCHMARK=y
87CONFIG_HOTPLUG_PCI=y
88CONFIG_HOTPLUG_PCI_S390=y
89CONFIG_CHSC_SCH=y
90CONFIG_VFIO_AP=m
91CONFIG_VFIO_CCW=m
92CONFIG_CRASH_DUMP=y
93CONFIG_BINFMT_MISC=m
94CONFIG_HIBERNATION=y
95CONFIG_PM_DEBUG=y
96CONFIG_NET=y 104CONFIG_NET=y
97CONFIG_PACKET=y 105CONFIG_PACKET=y
98CONFIG_PACKET_DIAG=m 106CONFIG_PACKET_DIAG=m
@@ -121,9 +129,6 @@ CONFIG_NET_IPVTI=m
121CONFIG_INET_AH=m 129CONFIG_INET_AH=m
122CONFIG_INET_ESP=m 130CONFIG_INET_ESP=m
123CONFIG_INET_IPCOMP=m 131CONFIG_INET_IPCOMP=m
124CONFIG_INET_XFRM_MODE_TRANSPORT=m
125CONFIG_INET_XFRM_MODE_TUNNEL=m
126CONFIG_INET_XFRM_MODE_BEET=m
127CONFIG_INET_DIAG=m 132CONFIG_INET_DIAG=m
128CONFIG_INET_UDP_DIAG=m 133CONFIG_INET_UDP_DIAG=m
129CONFIG_TCP_CONG_ADVANCED=y 134CONFIG_TCP_CONG_ADVANCED=y
@@ -139,10 +144,6 @@ CONFIG_INET6_AH=m
139CONFIG_INET6_ESP=m 144CONFIG_INET6_ESP=m
140CONFIG_INET6_IPCOMP=m 145CONFIG_INET6_IPCOMP=m
141CONFIG_IPV6_MIP6=m 146CONFIG_IPV6_MIP6=m
142CONFIG_INET6_XFRM_MODE_TRANSPORT=m
143CONFIG_INET6_XFRM_MODE_TUNNEL=m
144CONFIG_INET6_XFRM_MODE_BEET=m
145CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
146CONFIG_IPV6_VTI=m 147CONFIG_IPV6_VTI=m
147CONFIG_IPV6_SIT=m 148CONFIG_IPV6_SIT=m
148CONFIG_IPV6_GRE=m 149CONFIG_IPV6_GRE=m
@@ -264,11 +265,8 @@ CONFIG_IP_VS_SED=m
264CONFIG_IP_VS_NQ=m 265CONFIG_IP_VS_NQ=m
265CONFIG_IP_VS_FTP=m 266CONFIG_IP_VS_FTP=m
266CONFIG_IP_VS_PE_SIP=m 267CONFIG_IP_VS_PE_SIP=m
267CONFIG_NF_CONNTRACK_IPV4=m
268CONFIG_NF_TABLES_IPV4=y 268CONFIG_NF_TABLES_IPV4=y
269CONFIG_NFT_CHAIN_ROUTE_IPV4=m
270CONFIG_NF_TABLES_ARP=y 269CONFIG_NF_TABLES_ARP=y
271CONFIG_NFT_CHAIN_NAT_IPV4=m
272CONFIG_IP_NF_IPTABLES=m 270CONFIG_IP_NF_IPTABLES=m
273CONFIG_IP_NF_MATCH_AH=m 271CONFIG_IP_NF_MATCH_AH=m
274CONFIG_IP_NF_MATCH_ECN=m 272CONFIG_IP_NF_MATCH_ECN=m
@@ -287,10 +285,7 @@ CONFIG_IP_NF_SECURITY=m
287CONFIG_IP_NF_ARPTABLES=m 285CONFIG_IP_NF_ARPTABLES=m
288CONFIG_IP_NF_ARPFILTER=m 286CONFIG_IP_NF_ARPFILTER=m
289CONFIG_IP_NF_ARP_MANGLE=m 287CONFIG_IP_NF_ARP_MANGLE=m
290CONFIG_NF_CONNTRACK_IPV6=m
291CONFIG_NF_TABLES_IPV6=y 288CONFIG_NF_TABLES_IPV6=y
292CONFIG_NFT_CHAIN_ROUTE_IPV6=m
293CONFIG_NFT_CHAIN_NAT_IPV6=m
294CONFIG_IP6_NF_IPTABLES=m 289CONFIG_IP6_NF_IPTABLES=m
295CONFIG_IP6_NF_MATCH_AH=m 290CONFIG_IP6_NF_MATCH_AH=m
296CONFIG_IP6_NF_MATCH_EUI64=m 291CONFIG_IP6_NF_MATCH_EUI64=m
@@ -309,7 +304,7 @@ CONFIG_IP6_NF_RAW=m
309CONFIG_IP6_NF_SECURITY=m 304CONFIG_IP6_NF_SECURITY=m
310CONFIG_IP6_NF_NAT=m 305CONFIG_IP6_NF_NAT=m
311CONFIG_IP6_NF_TARGET_MASQUERADE=m 306CONFIG_IP6_NF_TARGET_MASQUERADE=m
312CONFIG_NF_TABLES_BRIDGE=y 307CONFIG_NF_TABLES_BRIDGE=m
313CONFIG_RDS=m 308CONFIG_RDS=m
314CONFIG_RDS_RDMA=m 309CONFIG_RDS_RDMA=m
315CONFIG_RDS_TCP=m 310CONFIG_RDS_TCP=m
@@ -375,9 +370,11 @@ CONFIG_NETLINK_DIAG=m
375CONFIG_CGROUP_NET_PRIO=y 370CONFIG_CGROUP_NET_PRIO=y
376CONFIG_BPF_JIT=y 371CONFIG_BPF_JIT=y
377CONFIG_NET_PKTGEN=m 372CONFIG_NET_PKTGEN=m
373CONFIG_PCI=y
374CONFIG_PCI_DEBUG=y
375CONFIG_HOTPLUG_PCI=y
376CONFIG_HOTPLUG_PCI_S390=y
378CONFIG_DEVTMPFS=y 377CONFIG_DEVTMPFS=y
379CONFIG_DMA_CMA=y
380CONFIG_CMA_SIZE_MBYTES=0
381CONFIG_CONNECTOR=y 378CONFIG_CONNECTOR=y
382CONFIG_ZRAM=m 379CONFIG_ZRAM=m
383CONFIG_BLK_DEV_LOOP=m 380CONFIG_BLK_DEV_LOOP=m
@@ -395,7 +392,6 @@ CONFIG_RAID_ATTRS=m
395CONFIG_SCSI=y 392CONFIG_SCSI=y
396CONFIG_BLK_DEV_SD=y 393CONFIG_BLK_DEV_SD=y
397CONFIG_CHR_DEV_ST=m 394CONFIG_CHR_DEV_ST=m
398CONFIG_CHR_DEV_OSST=m
399CONFIG_BLK_DEV_SR=m 395CONFIG_BLK_DEV_SR=m
400CONFIG_CHR_DEV_SG=y 396CONFIG_CHR_DEV_SG=y
401CONFIG_CHR_DEV_SCH=m 397CONFIG_CHR_DEV_SCH=m
@@ -415,17 +411,19 @@ CONFIG_SCSI_DH_RDAC=m
415CONFIG_SCSI_DH_HP_SW=m 411CONFIG_SCSI_DH_HP_SW=m
416CONFIG_SCSI_DH_EMC=m 412CONFIG_SCSI_DH_EMC=m
417CONFIG_SCSI_DH_ALUA=m 413CONFIG_SCSI_DH_ALUA=m
418CONFIG_SCSI_OSD_INITIATOR=m
419CONFIG_SCSI_OSD_ULD=m
420CONFIG_MD=y 414CONFIG_MD=y
421CONFIG_BLK_DEV_MD=y 415CONFIG_BLK_DEV_MD=y
422CONFIG_MD_LINEAR=m 416CONFIG_MD_LINEAR=m
423CONFIG_MD_MULTIPATH=m 417CONFIG_MD_MULTIPATH=m
424CONFIG_MD_FAULTY=m 418CONFIG_MD_FAULTY=m
419CONFIG_MD_CLUSTER=m
420CONFIG_BCACHE=m
425CONFIG_BLK_DEV_DM=m 421CONFIG_BLK_DEV_DM=m
422CONFIG_DM_UNSTRIPED=m
426CONFIG_DM_CRYPT=m 423CONFIG_DM_CRYPT=m
427CONFIG_DM_SNAPSHOT=m 424CONFIG_DM_SNAPSHOT=m
428CONFIG_DM_THIN_PROVISIONING=m 425CONFIG_DM_THIN_PROVISIONING=m
426CONFIG_DM_WRITECACHE=m
429CONFIG_DM_MIRROR=m 427CONFIG_DM_MIRROR=m
430CONFIG_DM_LOG_USERSPACE=m 428CONFIG_DM_LOG_USERSPACE=m
431CONFIG_DM_RAID=m 429CONFIG_DM_RAID=m
@@ -445,23 +443,78 @@ CONFIG_EQUALIZER=m
445CONFIG_IFB=m 443CONFIG_IFB=m
446CONFIG_MACVLAN=m 444CONFIG_MACVLAN=m
447CONFIG_MACVTAP=m 445CONFIG_MACVTAP=m
448CONFIG_VXLAN=m
449CONFIG_TUN=m 446CONFIG_TUN=m
450CONFIG_VETH=m 447CONFIG_VETH=m
451CONFIG_VIRTIO_NET=m 448CONFIG_VIRTIO_NET=m
452CONFIG_NLMON=m 449CONFIG_NLMON=m
450# CONFIG_NET_VENDOR_3COM is not set
451# CONFIG_NET_VENDOR_ADAPTEC is not set
452# CONFIG_NET_VENDOR_AGERE is not set
453# CONFIG_NET_VENDOR_ALACRITECH is not set
454# CONFIG_NET_VENDOR_ALTEON is not set
455# CONFIG_NET_VENDOR_AMAZON is not set
456# CONFIG_NET_VENDOR_AMD is not set
457# CONFIG_NET_VENDOR_AQUANTIA is not set
453# CONFIG_NET_VENDOR_ARC is not set 458# CONFIG_NET_VENDOR_ARC is not set
459# CONFIG_NET_VENDOR_ATHEROS is not set
460# CONFIG_NET_VENDOR_AURORA is not set
461# CONFIG_NET_VENDOR_BROADCOM is not set
462# CONFIG_NET_VENDOR_BROCADE is not set
463# CONFIG_NET_VENDOR_CADENCE is not set
464# CONFIG_NET_VENDOR_CAVIUM is not set
454# CONFIG_NET_VENDOR_CHELSIO is not set 465# CONFIG_NET_VENDOR_CHELSIO is not set
466# CONFIG_NET_VENDOR_CISCO is not set
467# CONFIG_NET_VENDOR_CORTINA is not set
468# CONFIG_NET_VENDOR_DEC is not set
469# CONFIG_NET_VENDOR_DLINK is not set
470# CONFIG_NET_VENDOR_EMULEX is not set
471# CONFIG_NET_VENDOR_EZCHIP is not set
472# CONFIG_NET_VENDOR_GOOGLE is not set
473# CONFIG_NET_VENDOR_HP is not set
474# CONFIG_NET_VENDOR_HUAWEI is not set
455# CONFIG_NET_VENDOR_INTEL is not set 475# CONFIG_NET_VENDOR_INTEL is not set
456# CONFIG_NET_VENDOR_MARVELL is not set 476# CONFIG_NET_VENDOR_MARVELL is not set
457CONFIG_MLX4_EN=m 477CONFIG_MLX4_EN=m
458CONFIG_MLX5_CORE=m 478CONFIG_MLX5_CORE=m
459CONFIG_MLX5_CORE_EN=y 479CONFIG_MLX5_CORE_EN=y
480# CONFIG_MLXFW is not set
481# CONFIG_NET_VENDOR_MICREL is not set
482# CONFIG_NET_VENDOR_MICROCHIP is not set
483# CONFIG_NET_VENDOR_MICROSEMI is not set
484# CONFIG_NET_VENDOR_MYRI is not set
460# CONFIG_NET_VENDOR_NATSEMI is not set 485# CONFIG_NET_VENDOR_NATSEMI is not set
486# CONFIG_NET_VENDOR_NETERION is not set
487# CONFIG_NET_VENDOR_NETRONOME is not set
488# CONFIG_NET_VENDOR_NI is not set
489# CONFIG_NET_VENDOR_NVIDIA is not set
490# CONFIG_NET_VENDOR_OKI is not set
491# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
492# CONFIG_NET_VENDOR_QLOGIC is not set
493# CONFIG_NET_VENDOR_QUALCOMM is not set
494# CONFIG_NET_VENDOR_RDC is not set
495# CONFIG_NET_VENDOR_REALTEK is not set
496# CONFIG_NET_VENDOR_RENESAS is not set
497# CONFIG_NET_VENDOR_ROCKER is not set
498# CONFIG_NET_VENDOR_SAMSUNG is not set
499# CONFIG_NET_VENDOR_SEEQ is not set
500# CONFIG_NET_VENDOR_SOLARFLARE is not set
501# CONFIG_NET_VENDOR_SILAN is not set
502# CONFIG_NET_VENDOR_SIS is not set
503# CONFIG_NET_VENDOR_SMSC is not set
504# CONFIG_NET_VENDOR_SOCIONEXT is not set
505# CONFIG_NET_VENDOR_STMICRO is not set
506# CONFIG_NET_VENDOR_SUN is not set
507# CONFIG_NET_VENDOR_SYNOPSYS is not set
508# CONFIG_NET_VENDOR_TEHUTI is not set
509# CONFIG_NET_VENDOR_TI is not set
510# CONFIG_NET_VENDOR_VIA is not set
511# CONFIG_NET_VENDOR_WIZNET is not set
461CONFIG_PPP=m 512CONFIG_PPP=m
462CONFIG_PPP_BSDCOMP=m 513CONFIG_PPP_BSDCOMP=m
463CONFIG_PPP_DEFLATE=m 514CONFIG_PPP_DEFLATE=m
515CONFIG_PPP_FILTER=y
464CONFIG_PPP_MPPE=m 516CONFIG_PPP_MPPE=m
517CONFIG_PPP_MULTILINK=y
465CONFIG_PPPOE=m 518CONFIG_PPPOE=m
466CONFIG_PPTP=m 519CONFIG_PPTP=m
467CONFIG_PPPOL2TP=m 520CONFIG_PPPOL2TP=m
@@ -473,10 +526,13 @@ CONFIG_INPUT_EVDEV=y
473# CONFIG_INPUT_MOUSE is not set 526# CONFIG_INPUT_MOUSE is not set
474# CONFIG_SERIO is not set 527# CONFIG_SERIO is not set
475CONFIG_LEGACY_PTY_COUNT=0 528CONFIG_LEGACY_PTY_COUNT=0
529CONFIG_NULL_TTY=m
476CONFIG_HW_RANDOM_VIRTIO=m 530CONFIG_HW_RANDOM_VIRTIO=m
477CONFIG_RAW_DRIVER=m 531CONFIG_RAW_DRIVER=m
478CONFIG_HANGCHECK_TIMER=m 532CONFIG_HANGCHECK_TIMER=m
479CONFIG_TN3270_FS=y 533CONFIG_TN3270_FS=y
534CONFIG_PPS=m
535# CONFIG_PTP_1588_CLOCK is not set
480# CONFIG_HWMON is not set 536# CONFIG_HWMON is not set
481CONFIG_WATCHDOG=y 537CONFIG_WATCHDOG=y
482CONFIG_WATCHDOG_NOWAYOUT=y 538CONFIG_WATCHDOG_NOWAYOUT=y
@@ -498,8 +554,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
498CONFIG_VIRTIO_PCI=m 554CONFIG_VIRTIO_PCI=m
499CONFIG_VIRTIO_BALLOON=m 555CONFIG_VIRTIO_BALLOON=m
500CONFIG_VIRTIO_INPUT=y 556CONFIG_VIRTIO_INPUT=y
501CONFIG_S390_AP_IOMMU=y
502CONFIG_S390_CCW_IOMMU=y 557CONFIG_S390_CCW_IOMMU=y
558CONFIG_S390_AP_IOMMU=y
503CONFIG_EXT4_FS=y 559CONFIG_EXT4_FS=y
504CONFIG_EXT4_FS_POSIX_ACL=y 560CONFIG_EXT4_FS_POSIX_ACL=y
505CONFIG_EXT4_FS_SECURITY=y 561CONFIG_EXT4_FS_SECURITY=y
@@ -519,6 +575,7 @@ CONFIG_OCFS2_FS=m
519CONFIG_BTRFS_FS=y 575CONFIG_BTRFS_FS=y
520CONFIG_BTRFS_FS_POSIX_ACL=y 576CONFIG_BTRFS_FS_POSIX_ACL=y
521CONFIG_BTRFS_DEBUG=y 577CONFIG_BTRFS_DEBUG=y
578CONFIG_BTRFS_ASSERT=y
522CONFIG_NILFS2_FS=m 579CONFIG_NILFS2_FS=m
523CONFIG_FS_DAX=y 580CONFIG_FS_DAX=y
524CONFIG_EXPORTFS_BLOCK_OPS=y 581CONFIG_EXPORTFS_BLOCK_OPS=y
@@ -552,8 +609,10 @@ CONFIG_ECRYPT_FS=m
552CONFIG_CRAMFS=m 609CONFIG_CRAMFS=m
553CONFIG_SQUASHFS=m 610CONFIG_SQUASHFS=m
554CONFIG_SQUASHFS_XATTR=y 611CONFIG_SQUASHFS_XATTR=y
612CONFIG_SQUASHFS_LZ4=y
555CONFIG_SQUASHFS_LZO=y 613CONFIG_SQUASHFS_LZO=y
556CONFIG_SQUASHFS_XZ=y 614CONFIG_SQUASHFS_XZ=y
615CONFIG_SQUASHFS_ZSTD=y
557CONFIG_ROMFS_FS=m 616CONFIG_ROMFS_FS=m
558CONFIG_NFS_FS=m 617CONFIG_NFS_FS=m
559CONFIG_NFS_V3_ACL=y 618CONFIG_NFS_V3_ACL=y
@@ -564,7 +623,6 @@ CONFIG_NFSD_V3_ACL=y
564CONFIG_NFSD_V4=y 623CONFIG_NFSD_V4=y
565CONFIG_NFSD_V4_SECURITY_LABEL=y 624CONFIG_NFSD_V4_SECURITY_LABEL=y
566CONFIG_CIFS=m 625CONFIG_CIFS=m
567CONFIG_CIFS_STATS=y
568CONFIG_CIFS_STATS2=y 626CONFIG_CIFS_STATS2=y
569CONFIG_CIFS_WEAK_PW_HASH=y 627CONFIG_CIFS_WEAK_PW_HASH=y
570CONFIG_CIFS_UPCALL=y 628CONFIG_CIFS_UPCALL=y
@@ -580,19 +638,112 @@ CONFIG_NLS_ISO8859_1=m
580CONFIG_NLS_ISO8859_15=m 638CONFIG_NLS_ISO8859_15=m
581CONFIG_NLS_UTF8=m 639CONFIG_NLS_UTF8=m
582CONFIG_DLM=m 640CONFIG_DLM=m
641CONFIG_UNICODE=y
642CONFIG_PERSISTENT_KEYRINGS=y
643CONFIG_BIG_KEYS=y
644CONFIG_ENCRYPTED_KEYS=m
645CONFIG_SECURITY=y
646CONFIG_SECURITY_NETWORK=y
647CONFIG_FORTIFY_SOURCE=y
648CONFIG_SECURITY_SELINUX=y
649CONFIG_SECURITY_SELINUX_BOOTPARAM=y
650CONFIG_SECURITY_SELINUX_DISABLE=y
651CONFIG_INTEGRITY_SIGNATURE=y
652CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
653CONFIG_IMA=y
654CONFIG_IMA_DEFAULT_HASH_SHA256=y
655CONFIG_IMA_WRITE_POLICY=y
656CONFIG_IMA_APPRAISE=y
657CONFIG_CRYPTO_USER=m
658# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
659CONFIG_CRYPTO_PCRYPT=m
660CONFIG_CRYPTO_CRYPTD=m
661CONFIG_CRYPTO_TEST=m
662CONFIG_CRYPTO_DH=m
663CONFIG_CRYPTO_ECDH=m
664CONFIG_CRYPTO_ECRDSA=m
665CONFIG_CRYPTO_CHACHA20POLY1305=m
666CONFIG_CRYPTO_AEGIS128=m
667CONFIG_CRYPTO_AEGIS128L=m
668CONFIG_CRYPTO_AEGIS256=m
669CONFIG_CRYPTO_MORUS640=m
670CONFIG_CRYPTO_MORUS1280=m
671CONFIG_CRYPTO_CFB=m
672CONFIG_CRYPTO_LRW=m
673CONFIG_CRYPTO_PCBC=m
674CONFIG_CRYPTO_KEYWRAP=m
675CONFIG_CRYPTO_ADIANTUM=m
676CONFIG_CRYPTO_XCBC=m
677CONFIG_CRYPTO_VMAC=m
678CONFIG_CRYPTO_CRC32=m
679CONFIG_CRYPTO_XXHASH=m
680CONFIG_CRYPTO_MICHAEL_MIC=m
681CONFIG_CRYPTO_RMD128=m
682CONFIG_CRYPTO_RMD160=m
683CONFIG_CRYPTO_RMD256=m
684CONFIG_CRYPTO_RMD320=m
685CONFIG_CRYPTO_SHA3=m
686CONFIG_CRYPTO_SM3=m
687CONFIG_CRYPTO_TGR192=m
688CONFIG_CRYPTO_WP512=m
689CONFIG_CRYPTO_AES_TI=m
690CONFIG_CRYPTO_ANUBIS=m
691CONFIG_CRYPTO_ARC4=m
692CONFIG_CRYPTO_BLOWFISH=m
693CONFIG_CRYPTO_CAMELLIA=m
694CONFIG_CRYPTO_CAST5=m
695CONFIG_CRYPTO_CAST6=m
696CONFIG_CRYPTO_FCRYPT=m
697CONFIG_CRYPTO_KHAZAD=m
698CONFIG_CRYPTO_SALSA20=m
699CONFIG_CRYPTO_SEED=m
700CONFIG_CRYPTO_SERPENT=m
701CONFIG_CRYPTO_SM4=m
702CONFIG_CRYPTO_TEA=m
703CONFIG_CRYPTO_TWOFISH=m
704CONFIG_CRYPTO_842=m
705CONFIG_CRYPTO_LZ4=m
706CONFIG_CRYPTO_LZ4HC=m
707CONFIG_CRYPTO_ZSTD=m
708CONFIG_CRYPTO_ANSI_CPRNG=m
709CONFIG_CRYPTO_USER_API_HASH=m
710CONFIG_CRYPTO_USER_API_SKCIPHER=m
711CONFIG_CRYPTO_USER_API_RNG=m
712CONFIG_CRYPTO_USER_API_AEAD=m
713CONFIG_CRYPTO_STATS=y
714CONFIG_ZCRYPT=m
715CONFIG_PKEY=m
716CONFIG_CRYPTO_PAES_S390=m
717CONFIG_CRYPTO_SHA1_S390=m
718CONFIG_CRYPTO_SHA256_S390=m
719CONFIG_CRYPTO_SHA512_S390=m
720CONFIG_CRYPTO_DES_S390=m
721CONFIG_CRYPTO_AES_S390=m
722CONFIG_CRYPTO_GHASH_S390=m
723CONFIG_CRYPTO_CRC32_S390=y
724CONFIG_CORDIC=m
725CONFIG_CRC32_SELFTEST=y
726CONFIG_CRC4=m
727CONFIG_CRC7=m
728CONFIG_CRC8=m
729CONFIG_RANDOM32_SELFTEST=y
730CONFIG_DMA_CMA=y
731CONFIG_CMA_SIZE_MBYTES=0
732CONFIG_DMA_API_DEBUG=y
733CONFIG_STRING_SELFTEST=y
583CONFIG_PRINTK_TIME=y 734CONFIG_PRINTK_TIME=y
584CONFIG_DYNAMIC_DEBUG=y 735CONFIG_DYNAMIC_DEBUG=y
585CONFIG_DEBUG_INFO=y 736CONFIG_DEBUG_INFO=y
586CONFIG_DEBUG_INFO_DWARF4=y 737CONFIG_DEBUG_INFO_DWARF4=y
587CONFIG_GDB_SCRIPTS=y 738CONFIG_GDB_SCRIPTS=y
588CONFIG_FRAME_WARN=1024 739CONFIG_FRAME_WARN=1024
589CONFIG_READABLE_ASM=y
590CONFIG_UNUSED_SYMBOLS=y 740CONFIG_UNUSED_SYMBOLS=y
591CONFIG_HEADERS_INSTALL=y 741CONFIG_HEADERS_INSTALL=y
592CONFIG_HEADERS_CHECK=y 742CONFIG_HEADERS_CHECK=y
593CONFIG_DEBUG_SECTION_MISMATCH=y 743CONFIG_DEBUG_SECTION_MISMATCH=y
594CONFIG_MAGIC_SYSRQ=y 744CONFIG_MAGIC_SYSRQ=y
595CONFIG_DEBUG_PAGEALLOC=y 745CONFIG_DEBUG_PAGEALLOC=y
746CONFIG_PAGE_OWNER=y
596CONFIG_DEBUG_RODATA_TEST=y 747CONFIG_DEBUG_RODATA_TEST=y
597CONFIG_DEBUG_OBJECTS=y 748CONFIG_DEBUG_OBJECTS=y
598CONFIG_DEBUG_OBJECTS_SELFTEST=y 749CONFIG_DEBUG_OBJECTS_SELFTEST=y
@@ -645,7 +796,6 @@ CONFIG_STACK_TRACER=y
645CONFIG_BLK_DEV_IO_TRACE=y 796CONFIG_BLK_DEV_IO_TRACE=y
646CONFIG_FUNCTION_PROFILER=y 797CONFIG_FUNCTION_PROFILER=y
647CONFIG_HIST_TRIGGERS=y 798CONFIG_HIST_TRIGGERS=y
648CONFIG_DMA_API_DEBUG=y
649CONFIG_LKDTM=m 799CONFIG_LKDTM=m
650CONFIG_TEST_LIST_SORT=y 800CONFIG_TEST_LIST_SORT=y
651CONFIG_TEST_SORT=y 801CONFIG_TEST_SORT=y
@@ -657,85 +807,3 @@ CONFIG_ATOMIC64_SELFTEST=y
657CONFIG_TEST_BPF=m 807CONFIG_TEST_BPF=m
658CONFIG_BUG_ON_DATA_CORRUPTION=y 808CONFIG_BUG_ON_DATA_CORRUPTION=y
659CONFIG_S390_PTDUMP=y 809CONFIG_S390_PTDUMP=y
660CONFIG_PERSISTENT_KEYRINGS=y
661CONFIG_BIG_KEYS=y
662CONFIG_ENCRYPTED_KEYS=m
663CONFIG_SECURITY=y
664CONFIG_SECURITY_NETWORK=y
665CONFIG_FORTIFY_SOURCE=y
666CONFIG_SECURITY_SELINUX=y
667CONFIG_SECURITY_SELINUX_BOOTPARAM=y
668CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
669CONFIG_SECURITY_SELINUX_DISABLE=y
670CONFIG_INTEGRITY_SIGNATURE=y
671CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
672CONFIG_IMA=y
673CONFIG_IMA_DEFAULT_HASH_SHA256=y
674CONFIG_IMA_WRITE_POLICY=y
675CONFIG_IMA_APPRAISE=y
676CONFIG_CRYPTO_DH=m
677CONFIG_CRYPTO_ECDH=m
678CONFIG_CRYPTO_USER=m
679# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
680CONFIG_CRYPTO_PCRYPT=m
681CONFIG_CRYPTO_CRYPTD=m
682CONFIG_CRYPTO_TEST=m
683CONFIG_CRYPTO_CHACHA20POLY1305=m
684CONFIG_CRYPTO_LRW=m
685CONFIG_CRYPTO_PCBC=m
686CONFIG_CRYPTO_KEYWRAP=m
687CONFIG_CRYPTO_XCBC=m
688CONFIG_CRYPTO_VMAC=m
689CONFIG_CRYPTO_CRC32=m
690CONFIG_CRYPTO_MICHAEL_MIC=m
691CONFIG_CRYPTO_RMD128=m
692CONFIG_CRYPTO_RMD160=m
693CONFIG_CRYPTO_RMD256=m
694CONFIG_CRYPTO_RMD320=m
695CONFIG_CRYPTO_SHA512=m
696CONFIG_CRYPTO_SHA3=m
697CONFIG_CRYPTO_TGR192=m
698CONFIG_CRYPTO_WP512=m
699CONFIG_CRYPTO_AES_TI=m
700CONFIG_CRYPTO_ANUBIS=m
701CONFIG_CRYPTO_BLOWFISH=m
702CONFIG_CRYPTO_CAMELLIA=m
703CONFIG_CRYPTO_CAST5=m
704CONFIG_CRYPTO_CAST6=m
705CONFIG_CRYPTO_FCRYPT=m
706CONFIG_CRYPTO_KHAZAD=m
707CONFIG_CRYPTO_SALSA20=m
708CONFIG_CRYPTO_SEED=m
709CONFIG_CRYPTO_SERPENT=m
710CONFIG_CRYPTO_TEA=m
711CONFIG_CRYPTO_TWOFISH=m
712CONFIG_CRYPTO_842=m
713CONFIG_CRYPTO_LZ4=m
714CONFIG_CRYPTO_LZ4HC=m
715CONFIG_CRYPTO_ANSI_CPRNG=m
716CONFIG_CRYPTO_USER_API_HASH=m
717CONFIG_CRYPTO_USER_API_SKCIPHER=m
718CONFIG_CRYPTO_USER_API_RNG=m
719CONFIG_CRYPTO_USER_API_AEAD=m
720CONFIG_ZCRYPT=m
721CONFIG_PKEY=m
722CONFIG_CRYPTO_PAES_S390=m
723CONFIG_CRYPTO_SHA1_S390=m
724CONFIG_CRYPTO_SHA256_S390=m
725CONFIG_CRYPTO_SHA512_S390=m
726CONFIG_CRYPTO_DES_S390=m
727CONFIG_CRYPTO_AES_S390=m
728CONFIG_CRYPTO_GHASH_S390=m
729CONFIG_CRYPTO_CRC32_S390=y
730CONFIG_PKCS7_MESSAGE_PARSER=y
731CONFIG_SYSTEM_TRUSTED_KEYRING=y
732CONFIG_CRC7=m
733CONFIG_CRC8=m
734CONFIG_RANDOM32_SELFTEST=y
735CONFIG_CORDIC=m
736CONFIG_CMM=m
737CONFIG_APPLDATA_BASE=y
738CONFIG_KVM=m
739CONFIG_KVM_S390_UCONTROL=y
740CONFIG_VHOST_NET=m
741CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index e4bc40073003..68d3ca83302b 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -12,30 +12,51 @@ CONFIG_TASK_IO_ACCOUNTING=y
12CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
14CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
16CONFIG_MEMCG=y 15CONFIG_MEMCG=y
17CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
18CONFIG_BLK_CGROUP=y 17CONFIG_BLK_CGROUP=y
19CONFIG_CFS_BANDWIDTH=y 18CONFIG_CFS_BANDWIDTH=y
20CONFIG_RT_GROUP_SCHED=y 19CONFIG_RT_GROUP_SCHED=y
21CONFIG_CGROUP_PIDS=y 20CONFIG_CGROUP_PIDS=y
21CONFIG_CGROUP_RDMA=y
22CONFIG_CGROUP_FREEZER=y 22CONFIG_CGROUP_FREEZER=y
23CONFIG_CGROUP_HUGETLB=y 23CONFIG_CGROUP_HUGETLB=y
24CONFIG_CPUSETS=y 24CONFIG_CPUSETS=y
25CONFIG_CGROUP_DEVICE=y 25CONFIG_CGROUP_DEVICE=y
26CONFIG_CGROUP_CPUACCT=y 26CONFIG_CGROUP_CPUACCT=y
27CONFIG_CGROUP_PERF=y 27CONFIG_CGROUP_PERF=y
28CONFIG_CGROUP_BPF=y
28CONFIG_NAMESPACES=y 29CONFIG_NAMESPACES=y
29CONFIG_USER_NS=y 30CONFIG_USER_NS=y
31CONFIG_CHECKPOINT_RESTORE=y
30CONFIG_SCHED_AUTOGROUP=y 32CONFIG_SCHED_AUTOGROUP=y
31CONFIG_BLK_DEV_INITRD=y 33CONFIG_BLK_DEV_INITRD=y
32CONFIG_EXPERT=y 34CONFIG_EXPERT=y
33# CONFIG_SYSFS_SYSCALL is not set 35# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_CHECKPOINT_RESTORE=y
35CONFIG_BPF_SYSCALL=y 36CONFIG_BPF_SYSCALL=y
36CONFIG_USERFAULTFD=y 37CONFIG_USERFAULTFD=y
37# CONFIG_COMPAT_BRK is not set 38# CONFIG_COMPAT_BRK is not set
38CONFIG_PROFILING=y 39CONFIG_PROFILING=y
40CONFIG_LIVEPATCH=y
41CONFIG_TUNE_ZEC12=y
42CONFIG_NR_CPUS=512
43CONFIG_NUMA=y
44# CONFIG_NUMA_EMU is not set
45CONFIG_HZ_100=y
46CONFIG_KEXEC_FILE=y
47CONFIG_EXPOLINE=y
48CONFIG_EXPOLINE_AUTO=y
49CONFIG_CHSC_SCH=y
50CONFIG_VFIO_CCW=m
51CONFIG_VFIO_AP=m
52CONFIG_CRASH_DUMP=y
53CONFIG_HIBERNATION=y
54CONFIG_PM_DEBUG=y
55CONFIG_CMM=m
56CONFIG_APPLDATA_BASE=y
57CONFIG_KVM=m
58CONFIG_VHOST_NET=m
59CONFIG_VHOST_VSOCK=m
39CONFIG_OPROFILE=m 60CONFIG_OPROFILE=m
40CONFIG_KPROBES=y 61CONFIG_KPROBES=y
41CONFIG_JUMP_LABEL=y 62CONFIG_JUMP_LABEL=y
@@ -47,27 +68,18 @@ CONFIG_MODVERSIONS=y
47CONFIG_MODULE_SRCVERSION_ALL=y 68CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_MODULE_SIG=y 69CONFIG_MODULE_SIG=y
49CONFIG_MODULE_SIG_SHA256=y 70CONFIG_MODULE_SIG_SHA256=y
50CONFIG_BLK_DEV_INTEGRITY=y
51CONFIG_BLK_DEV_THROTTLING=y 71CONFIG_BLK_DEV_THROTTLING=y
52CONFIG_BLK_WBT=y 72CONFIG_BLK_WBT=y
53CONFIG_BLK_WBT_SQ=y 73CONFIG_BLK_CGROUP_IOLATENCY=y
54CONFIG_PARTITION_ADVANCED=y 74CONFIG_PARTITION_ADVANCED=y
55CONFIG_IBM_PARTITION=y 75CONFIG_IBM_PARTITION=y
56CONFIG_BSD_DISKLABEL=y 76CONFIG_BSD_DISKLABEL=y
57CONFIG_MINIX_SUBPARTITION=y 77CONFIG_MINIX_SUBPARTITION=y
58CONFIG_SOLARIS_X86_PARTITION=y 78CONFIG_SOLARIS_X86_PARTITION=y
59CONFIG_UNIXWARE_DISKLABEL=y 79CONFIG_UNIXWARE_DISKLABEL=y
60CONFIG_CFQ_GROUP_IOSCHED=y 80CONFIG_IOSCHED_BFQ=y
61CONFIG_DEFAULT_DEADLINE=y 81CONFIG_BFQ_GROUP_IOSCHED=y
62CONFIG_LIVEPATCH=y 82CONFIG_BINFMT_MISC=m
63CONFIG_TUNE_ZEC12=y
64CONFIG_NR_CPUS=512
65CONFIG_NUMA=y
66CONFIG_HZ_100=y
67CONFIG_KEXEC_FILE=y
68CONFIG_KEXEC_VERIFY_SIG=y
69CONFIG_EXPOLINE=y
70CONFIG_EXPOLINE_AUTO=y
71CONFIG_MEMORY_HOTPLUG=y 83CONFIG_MEMORY_HOTPLUG=y
72CONFIG_MEMORY_HOTREMOVE=y 84CONFIG_MEMORY_HOTREMOVE=y
73CONFIG_KSM=y 85CONFIG_KSM=y
@@ -81,16 +93,8 @@ CONFIG_ZSMALLOC=m
81CONFIG_ZSMALLOC_STAT=y 93CONFIG_ZSMALLOC_STAT=y
82CONFIG_DEFERRED_STRUCT_PAGE_INIT=y 94CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
83CONFIG_IDLE_PAGE_TRACKING=y 95CONFIG_IDLE_PAGE_TRACKING=y
84CONFIG_PCI=y 96CONFIG_PERCPU_STATS=y
85CONFIG_HOTPLUG_PCI=y 97CONFIG_GUP_BENCHMARK=y
86CONFIG_HOTPLUG_PCI_S390=y
87CONFIG_CHSC_SCH=y
88CONFIG_VFIO_AP=m
89CONFIG_VFIO_CCW=m
90CONFIG_CRASH_DUMP=y
91CONFIG_BINFMT_MISC=m
92CONFIG_HIBERNATION=y
93CONFIG_PM_DEBUG=y
94CONFIG_NET=y 98CONFIG_NET=y
95CONFIG_PACKET=y 99CONFIG_PACKET=y
96CONFIG_PACKET_DIAG=m 100CONFIG_PACKET_DIAG=m
@@ -119,9 +123,6 @@ CONFIG_NET_IPVTI=m
119CONFIG_INET_AH=m 123CONFIG_INET_AH=m
120CONFIG_INET_ESP=m 124CONFIG_INET_ESP=m
121CONFIG_INET_IPCOMP=m 125CONFIG_INET_IPCOMP=m
122CONFIG_INET_XFRM_MODE_TRANSPORT=m
123CONFIG_INET_XFRM_MODE_TUNNEL=m
124CONFIG_INET_XFRM_MODE_BEET=m
125CONFIG_INET_DIAG=m 126CONFIG_INET_DIAG=m
126CONFIG_INET_UDP_DIAG=m 127CONFIG_INET_UDP_DIAG=m
127CONFIG_TCP_CONG_ADVANCED=y 128CONFIG_TCP_CONG_ADVANCED=y
@@ -137,10 +138,6 @@ CONFIG_INET6_AH=m
137CONFIG_INET6_ESP=m 138CONFIG_INET6_ESP=m
138CONFIG_INET6_IPCOMP=m 139CONFIG_INET6_IPCOMP=m
139CONFIG_IPV6_MIP6=m 140CONFIG_IPV6_MIP6=m
140CONFIG_INET6_XFRM_MODE_TRANSPORT=m
141CONFIG_INET6_XFRM_MODE_TUNNEL=m
142CONFIG_INET6_XFRM_MODE_BEET=m
143CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
144CONFIG_IPV6_VTI=m 141CONFIG_IPV6_VTI=m
145CONFIG_IPV6_SIT=m 142CONFIG_IPV6_SIT=m
146CONFIG_IPV6_GRE=m 143CONFIG_IPV6_GRE=m
@@ -262,11 +259,8 @@ CONFIG_IP_VS_SED=m
262CONFIG_IP_VS_NQ=m 259CONFIG_IP_VS_NQ=m
263CONFIG_IP_VS_FTP=m 260CONFIG_IP_VS_FTP=m
264CONFIG_IP_VS_PE_SIP=m 261CONFIG_IP_VS_PE_SIP=m
265CONFIG_NF_CONNTRACK_IPV4=m
266CONFIG_NF_TABLES_IPV4=y 262CONFIG_NF_TABLES_IPV4=y
267CONFIG_NFT_CHAIN_ROUTE_IPV4=m
268CONFIG_NF_TABLES_ARP=y 263CONFIG_NF_TABLES_ARP=y
269CONFIG_NFT_CHAIN_NAT_IPV4=m
270CONFIG_IP_NF_IPTABLES=m 264CONFIG_IP_NF_IPTABLES=m
271CONFIG_IP_NF_MATCH_AH=m 265CONFIG_IP_NF_MATCH_AH=m
272CONFIG_IP_NF_MATCH_ECN=m 266CONFIG_IP_NF_MATCH_ECN=m
@@ -285,10 +279,7 @@ CONFIG_IP_NF_SECURITY=m
285CONFIG_IP_NF_ARPTABLES=m 279CONFIG_IP_NF_ARPTABLES=m
286CONFIG_IP_NF_ARPFILTER=m 280CONFIG_IP_NF_ARPFILTER=m
287CONFIG_IP_NF_ARP_MANGLE=m 281CONFIG_IP_NF_ARP_MANGLE=m
288CONFIG_NF_CONNTRACK_IPV6=m
289CONFIG_NF_TABLES_IPV6=y 282CONFIG_NF_TABLES_IPV6=y
290CONFIG_NFT_CHAIN_ROUTE_IPV6=m
291CONFIG_NFT_CHAIN_NAT_IPV6=m
292CONFIG_IP6_NF_IPTABLES=m 283CONFIG_IP6_NF_IPTABLES=m
293CONFIG_IP6_NF_MATCH_AH=m 284CONFIG_IP6_NF_MATCH_AH=m
294CONFIG_IP6_NF_MATCH_EUI64=m 285CONFIG_IP6_NF_MATCH_EUI64=m
@@ -307,7 +298,7 @@ CONFIG_IP6_NF_RAW=m
307CONFIG_IP6_NF_SECURITY=m 298CONFIG_IP6_NF_SECURITY=m
308CONFIG_IP6_NF_NAT=m 299CONFIG_IP6_NF_NAT=m
309CONFIG_IP6_NF_TARGET_MASQUERADE=m 300CONFIG_IP6_NF_TARGET_MASQUERADE=m
310CONFIG_NF_TABLES_BRIDGE=y 301CONFIG_NF_TABLES_BRIDGE=m
311CONFIG_RDS=m 302CONFIG_RDS=m
312CONFIG_RDS_RDMA=m 303CONFIG_RDS_RDMA=m
313CONFIG_RDS_TCP=m 304CONFIG_RDS_TCP=m
@@ -372,9 +363,11 @@ CONFIG_NETLINK_DIAG=m
372CONFIG_CGROUP_NET_PRIO=y 363CONFIG_CGROUP_NET_PRIO=y
373CONFIG_BPF_JIT=y 364CONFIG_BPF_JIT=y
374CONFIG_NET_PKTGEN=m 365CONFIG_NET_PKTGEN=m
366CONFIG_PCI=y
367CONFIG_HOTPLUG_PCI=y
368CONFIG_HOTPLUG_PCI_S390=y
369CONFIG_UEVENT_HELPER=y
375CONFIG_DEVTMPFS=y 370CONFIG_DEVTMPFS=y
376CONFIG_DMA_CMA=y
377CONFIG_CMA_SIZE_MBYTES=0
378CONFIG_CONNECTOR=y 371CONFIG_CONNECTOR=y
379CONFIG_ZRAM=m 372CONFIG_ZRAM=m
380CONFIG_BLK_DEV_LOOP=m 373CONFIG_BLK_DEV_LOOP=m
@@ -383,6 +376,7 @@ CONFIG_BLK_DEV_DRBD=m
383CONFIG_BLK_DEV_NBD=m 376CONFIG_BLK_DEV_NBD=m
384CONFIG_BLK_DEV_RAM=y 377CONFIG_BLK_DEV_RAM=y
385CONFIG_BLK_DEV_RAM_SIZE=32768 378CONFIG_BLK_DEV_RAM_SIZE=32768
379# CONFIG_BLK_DEV_XPRAM is not set
386CONFIG_VIRTIO_BLK=y 380CONFIG_VIRTIO_BLK=y
387CONFIG_BLK_DEV_RBD=m 381CONFIG_BLK_DEV_RBD=m
388CONFIG_BLK_DEV_NVME=m 382CONFIG_BLK_DEV_NVME=m
@@ -392,7 +386,6 @@ CONFIG_RAID_ATTRS=m
392CONFIG_SCSI=y 386CONFIG_SCSI=y
393CONFIG_BLK_DEV_SD=y 387CONFIG_BLK_DEV_SD=y
394CONFIG_CHR_DEV_ST=m 388CONFIG_CHR_DEV_ST=m
395CONFIG_CHR_DEV_OSST=m
396CONFIG_BLK_DEV_SR=m 389CONFIG_BLK_DEV_SR=m
397CONFIG_CHR_DEV_SG=y 390CONFIG_CHR_DEV_SG=y
398CONFIG_CHR_DEV_SCH=m 391CONFIG_CHR_DEV_SCH=m
@@ -412,17 +405,19 @@ CONFIG_SCSI_DH_RDAC=m
412CONFIG_SCSI_DH_HP_SW=m 405CONFIG_SCSI_DH_HP_SW=m
413CONFIG_SCSI_DH_EMC=m 406CONFIG_SCSI_DH_EMC=m
414CONFIG_SCSI_DH_ALUA=m 407CONFIG_SCSI_DH_ALUA=m
415CONFIG_SCSI_OSD_INITIATOR=m
416CONFIG_SCSI_OSD_ULD=m
417CONFIG_MD=y 408CONFIG_MD=y
418CONFIG_BLK_DEV_MD=y 409CONFIG_BLK_DEV_MD=y
419CONFIG_MD_LINEAR=m 410CONFIG_MD_LINEAR=m
420CONFIG_MD_MULTIPATH=m 411CONFIG_MD_MULTIPATH=m
421CONFIG_MD_FAULTY=m 412CONFIG_MD_FAULTY=m
413CONFIG_MD_CLUSTER=m
414CONFIG_BCACHE=m
422CONFIG_BLK_DEV_DM=m 415CONFIG_BLK_DEV_DM=m
416CONFIG_DM_UNSTRIPED=m
423CONFIG_DM_CRYPT=m 417CONFIG_DM_CRYPT=m
424CONFIG_DM_SNAPSHOT=m 418CONFIG_DM_SNAPSHOT=m
425CONFIG_DM_THIN_PROVISIONING=m 419CONFIG_DM_THIN_PROVISIONING=m
420CONFIG_DM_WRITECACHE=m
426CONFIG_DM_MIRROR=m 421CONFIG_DM_MIRROR=m
427CONFIG_DM_LOG_USERSPACE=m 422CONFIG_DM_LOG_USERSPACE=m
428CONFIG_DM_RAID=m 423CONFIG_DM_RAID=m
@@ -435,6 +430,7 @@ CONFIG_DM_UEVENT=y
435CONFIG_DM_FLAKEY=m 430CONFIG_DM_FLAKEY=m
436CONFIG_DM_VERITY=m 431CONFIG_DM_VERITY=m
437CONFIG_DM_SWITCH=m 432CONFIG_DM_SWITCH=m
433CONFIG_DM_INTEGRITY=m
438CONFIG_NETDEVICES=y 434CONFIG_NETDEVICES=y
439CONFIG_BONDING=m 435CONFIG_BONDING=m
440CONFIG_DUMMY=m 436CONFIG_DUMMY=m
@@ -442,23 +438,78 @@ CONFIG_EQUALIZER=m
442CONFIG_IFB=m 438CONFIG_IFB=m
443CONFIG_MACVLAN=m 439CONFIG_MACVLAN=m
444CONFIG_MACVTAP=m 440CONFIG_MACVTAP=m
445CONFIG_VXLAN=m
446CONFIG_TUN=m 441CONFIG_TUN=m
447CONFIG_VETH=m 442CONFIG_VETH=m
448CONFIG_VIRTIO_NET=m 443CONFIG_VIRTIO_NET=m
449CONFIG_NLMON=m 444CONFIG_NLMON=m
445# CONFIG_NET_VENDOR_3COM is not set
446# CONFIG_NET_VENDOR_ADAPTEC is not set
447# CONFIG_NET_VENDOR_AGERE is not set
448# CONFIG_NET_VENDOR_ALACRITECH is not set
449# CONFIG_NET_VENDOR_ALTEON is not set
450# CONFIG_NET_VENDOR_AMAZON is not set
451# CONFIG_NET_VENDOR_AMD is not set
452# CONFIG_NET_VENDOR_AQUANTIA is not set
450# CONFIG_NET_VENDOR_ARC is not set 453# CONFIG_NET_VENDOR_ARC is not set
454# CONFIG_NET_VENDOR_ATHEROS is not set
455# CONFIG_NET_VENDOR_AURORA is not set
456# CONFIG_NET_VENDOR_BROADCOM is not set
457# CONFIG_NET_VENDOR_BROCADE is not set
458# CONFIG_NET_VENDOR_CADENCE is not set
459# CONFIG_NET_VENDOR_CAVIUM is not set
451# CONFIG_NET_VENDOR_CHELSIO is not set 460# CONFIG_NET_VENDOR_CHELSIO is not set
461# CONFIG_NET_VENDOR_CISCO is not set
462# CONFIG_NET_VENDOR_CORTINA is not set
463# CONFIG_NET_VENDOR_DEC is not set
464# CONFIG_NET_VENDOR_DLINK is not set
465# CONFIG_NET_VENDOR_EMULEX is not set
466# CONFIG_NET_VENDOR_EZCHIP is not set
467# CONFIG_NET_VENDOR_GOOGLE is not set
468# CONFIG_NET_VENDOR_HP is not set
469# CONFIG_NET_VENDOR_HUAWEI is not set
452# CONFIG_NET_VENDOR_INTEL is not set 470# CONFIG_NET_VENDOR_INTEL is not set
453# CONFIG_NET_VENDOR_MARVELL is not set 471# CONFIG_NET_VENDOR_MARVELL is not set
454CONFIG_MLX4_EN=m 472CONFIG_MLX4_EN=m
455CONFIG_MLX5_CORE=m 473CONFIG_MLX5_CORE=m
456CONFIG_MLX5_CORE_EN=y 474CONFIG_MLX5_CORE_EN=y
475# CONFIG_MLXFW is not set
476# CONFIG_NET_VENDOR_MICREL is not set
477# CONFIG_NET_VENDOR_MICROCHIP is not set
478# CONFIG_NET_VENDOR_MICROSEMI is not set
479# CONFIG_NET_VENDOR_MYRI is not set
457# CONFIG_NET_VENDOR_NATSEMI is not set 480# CONFIG_NET_VENDOR_NATSEMI is not set
481# CONFIG_NET_VENDOR_NETERION is not set
482# CONFIG_NET_VENDOR_NETRONOME is not set
483# CONFIG_NET_VENDOR_NI is not set
484# CONFIG_NET_VENDOR_NVIDIA is not set
485# CONFIG_NET_VENDOR_OKI is not set
486# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
487# CONFIG_NET_VENDOR_QLOGIC is not set
488# CONFIG_NET_VENDOR_QUALCOMM is not set
489# CONFIG_NET_VENDOR_RDC is not set
490# CONFIG_NET_VENDOR_REALTEK is not set
491# CONFIG_NET_VENDOR_RENESAS is not set
492# CONFIG_NET_VENDOR_ROCKER is not set
493# CONFIG_NET_VENDOR_SAMSUNG is not set
494# CONFIG_NET_VENDOR_SEEQ is not set
495# CONFIG_NET_VENDOR_SOLARFLARE is not set
496# CONFIG_NET_VENDOR_SILAN is not set
497# CONFIG_NET_VENDOR_SIS is not set
498# CONFIG_NET_VENDOR_SMSC is not set
499# CONFIG_NET_VENDOR_SOCIONEXT is not set
500# CONFIG_NET_VENDOR_STMICRO is not set
501# CONFIG_NET_VENDOR_SUN is not set
502# CONFIG_NET_VENDOR_SYNOPSYS is not set
503# CONFIG_NET_VENDOR_TEHUTI is not set
504# CONFIG_NET_VENDOR_TI is not set
505# CONFIG_NET_VENDOR_VIA is not set
506# CONFIG_NET_VENDOR_WIZNET is not set
458CONFIG_PPP=m 507CONFIG_PPP=m
459CONFIG_PPP_BSDCOMP=m 508CONFIG_PPP_BSDCOMP=m
460CONFIG_PPP_DEFLATE=m 509CONFIG_PPP_DEFLATE=m
510CONFIG_PPP_FILTER=y
461CONFIG_PPP_MPPE=m 511CONFIG_PPP_MPPE=m
512CONFIG_PPP_MULTILINK=y
462CONFIG_PPPOE=m 513CONFIG_PPPOE=m
463CONFIG_PPTP=m 514CONFIG_PPTP=m
464CONFIG_PPPOL2TP=m 515CONFIG_PPPOL2TP=m
@@ -470,17 +521,21 @@ CONFIG_INPUT_EVDEV=y
470# CONFIG_INPUT_MOUSE is not set 521# CONFIG_INPUT_MOUSE is not set
471# CONFIG_SERIO is not set 522# CONFIG_SERIO is not set
472CONFIG_LEGACY_PTY_COUNT=0 523CONFIG_LEGACY_PTY_COUNT=0
524CONFIG_NULL_TTY=m
473CONFIG_HW_RANDOM_VIRTIO=m 525CONFIG_HW_RANDOM_VIRTIO=m
474CONFIG_RAW_DRIVER=m 526CONFIG_RAW_DRIVER=m
475CONFIG_HANGCHECK_TIMER=m 527CONFIG_HANGCHECK_TIMER=m
476CONFIG_TN3270_FS=y 528CONFIG_TN3270_FS=y
529# CONFIG_PTP_1588_CLOCK is not set
477# CONFIG_HWMON is not set 530# CONFIG_HWMON is not set
478CONFIG_WATCHDOG=y 531CONFIG_WATCHDOG=y
532CONFIG_WATCHDOG_CORE=y
479CONFIG_WATCHDOG_NOWAYOUT=y 533CONFIG_WATCHDOG_NOWAYOUT=y
480CONFIG_SOFT_WATCHDOG=m 534CONFIG_SOFT_WATCHDOG=m
481CONFIG_DIAG288_WATCHDOG=m 535CONFIG_DIAG288_WATCHDOG=m
482CONFIG_DRM=y 536CONFIG_DRM=y
483CONFIG_DRM_VIRTIO_GPU=y 537CONFIG_DRM_VIRTIO_GPU=y
538# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
484CONFIG_FRAMEBUFFER_CONSOLE=y 539CONFIG_FRAMEBUFFER_CONSOLE=y
485# CONFIG_HID is not set 540# CONFIG_HID is not set
486# CONFIG_USB_SUPPORT is not set 541# CONFIG_USB_SUPPORT is not set
@@ -495,8 +550,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
495CONFIG_VIRTIO_PCI=m 550CONFIG_VIRTIO_PCI=m
496CONFIG_VIRTIO_BALLOON=m 551CONFIG_VIRTIO_BALLOON=m
497CONFIG_VIRTIO_INPUT=y 552CONFIG_VIRTIO_INPUT=y
498CONFIG_S390_AP_IOMMU=y
499CONFIG_S390_CCW_IOMMU=y 553CONFIG_S390_CCW_IOMMU=y
554CONFIG_S390_AP_IOMMU=y
500CONFIG_EXT4_FS=y 555CONFIG_EXT4_FS=y
501CONFIG_EXT4_FS_POSIX_ACL=y 556CONFIG_EXT4_FS_POSIX_ACL=y
502CONFIG_EXT4_FS_SECURITY=y 557CONFIG_EXT4_FS_SECURITY=y
@@ -546,8 +601,10 @@ CONFIG_ECRYPT_FS=m
546CONFIG_CRAMFS=m 601CONFIG_CRAMFS=m
547CONFIG_SQUASHFS=m 602CONFIG_SQUASHFS=m
548CONFIG_SQUASHFS_XATTR=y 603CONFIG_SQUASHFS_XATTR=y
604CONFIG_SQUASHFS_LZ4=y
549CONFIG_SQUASHFS_LZO=y 605CONFIG_SQUASHFS_LZO=y
550CONFIG_SQUASHFS_XZ=y 606CONFIG_SQUASHFS_XZ=y
607CONFIG_SQUASHFS_ZSTD=y
551CONFIG_ROMFS_FS=m 608CONFIG_ROMFS_FS=m
552CONFIG_NFS_FS=m 609CONFIG_NFS_FS=m
553CONFIG_NFS_V3_ACL=y 610CONFIG_NFS_V3_ACL=y
@@ -558,7 +615,6 @@ CONFIG_NFSD_V3_ACL=y
558CONFIG_NFSD_V4=y 615CONFIG_NFSD_V4=y
559CONFIG_NFSD_V4_SECURITY_LABEL=y 616CONFIG_NFSD_V4_SECURITY_LABEL=y
560CONFIG_CIFS=m 617CONFIG_CIFS=m
561CONFIG_CIFS_STATS=y
562CONFIG_CIFS_STATS2=y 618CONFIG_CIFS_STATS2=y
563CONFIG_CIFS_WEAK_PW_HASH=y 619CONFIG_CIFS_WEAK_PW_HASH=y
564CONFIG_CIFS_UPCALL=y 620CONFIG_CIFS_UPCALL=y
@@ -574,31 +630,7 @@ CONFIG_NLS_ISO8859_1=m
574CONFIG_NLS_ISO8859_15=m 630CONFIG_NLS_ISO8859_15=m
575CONFIG_NLS_UTF8=m 631CONFIG_NLS_UTF8=m
576CONFIG_DLM=m 632CONFIG_DLM=m
577CONFIG_PRINTK_TIME=y 633CONFIG_UNICODE=y
578CONFIG_DEBUG_INFO=y
579CONFIG_DEBUG_INFO_DWARF4=y
580CONFIG_GDB_SCRIPTS=y
581# CONFIG_ENABLE_MUST_CHECK is not set
582CONFIG_FRAME_WARN=1024
583CONFIG_UNUSED_SYMBOLS=y
584CONFIG_MAGIC_SYSRQ=y
585CONFIG_DEBUG_MEMORY_INIT=y
586CONFIG_PANIC_ON_OOPS=y
587CONFIG_RCU_TORTURE_TEST=m
588CONFIG_RCU_CPU_STALL_TIMEOUT=60
589CONFIG_LATENCYTOP=y
590CONFIG_SCHED_TRACER=y
591CONFIG_FTRACE_SYSCALLS=y
592CONFIG_STACK_TRACER=y
593CONFIG_BLK_DEV_IO_TRACE=y
594CONFIG_FUNCTION_PROFILER=y
595CONFIG_HIST_TRIGGERS=y
596CONFIG_LKDTM=m
597CONFIG_PERCPU_TEST=m
598CONFIG_ATOMIC64_SELFTEST=y
599CONFIG_TEST_BPF=m
600CONFIG_BUG_ON_DATA_CORRUPTION=y
601CONFIG_S390_PTDUMP=y
602CONFIG_PERSISTENT_KEYRINGS=y 634CONFIG_PERSISTENT_KEYRINGS=y
603CONFIG_BIG_KEYS=y 635CONFIG_BIG_KEYS=y
604CONFIG_ENCRYPTED_KEYS=m 636CONFIG_ENCRYPTED_KEYS=m
@@ -606,7 +638,6 @@ CONFIG_SECURITY=y
606CONFIG_SECURITY_NETWORK=y 638CONFIG_SECURITY_NETWORK=y
607CONFIG_SECURITY_SELINUX=y 639CONFIG_SECURITY_SELINUX=y
608CONFIG_SECURITY_SELINUX_BOOTPARAM=y 640CONFIG_SECURITY_SELINUX_BOOTPARAM=y
609CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
610CONFIG_SECURITY_SELINUX_DISABLE=y 641CONFIG_SECURITY_SELINUX_DISABLE=y
611CONFIG_INTEGRITY_SIGNATURE=y 642CONFIG_INTEGRITY_SIGNATURE=y
612CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 643CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
@@ -615,31 +646,42 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
615CONFIG_IMA_WRITE_POLICY=y 646CONFIG_IMA_WRITE_POLICY=y
616CONFIG_IMA_APPRAISE=y 647CONFIG_IMA_APPRAISE=y
617CONFIG_CRYPTO_FIPS=y 648CONFIG_CRYPTO_FIPS=y
618CONFIG_CRYPTO_DH=m
619CONFIG_CRYPTO_ECDH=m
620CONFIG_CRYPTO_USER=m 649CONFIG_CRYPTO_USER=m
621# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 650# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
622CONFIG_CRYPTO_PCRYPT=m 651CONFIG_CRYPTO_PCRYPT=m
623CONFIG_CRYPTO_CRYPTD=m 652CONFIG_CRYPTO_CRYPTD=m
624CONFIG_CRYPTO_TEST=m 653CONFIG_CRYPTO_TEST=m
654CONFIG_CRYPTO_DH=m
655CONFIG_CRYPTO_ECDH=m
656CONFIG_CRYPTO_ECRDSA=m
625CONFIG_CRYPTO_CHACHA20POLY1305=m 657CONFIG_CRYPTO_CHACHA20POLY1305=m
658CONFIG_CRYPTO_AEGIS128=m
659CONFIG_CRYPTO_AEGIS128L=m
660CONFIG_CRYPTO_AEGIS256=m
661CONFIG_CRYPTO_MORUS640=m
662CONFIG_CRYPTO_MORUS1280=m
663CONFIG_CRYPTO_CFB=m
626CONFIG_CRYPTO_LRW=m 664CONFIG_CRYPTO_LRW=m
665CONFIG_CRYPTO_OFB=m
627CONFIG_CRYPTO_PCBC=m 666CONFIG_CRYPTO_PCBC=m
628CONFIG_CRYPTO_KEYWRAP=m 667CONFIG_CRYPTO_KEYWRAP=m
668CONFIG_CRYPTO_ADIANTUM=m
629CONFIG_CRYPTO_XCBC=m 669CONFIG_CRYPTO_XCBC=m
630CONFIG_CRYPTO_VMAC=m 670CONFIG_CRYPTO_VMAC=m
631CONFIG_CRYPTO_CRC32=m 671CONFIG_CRYPTO_CRC32=m
672CONFIG_CRYPTO_XXHASH=m
632CONFIG_CRYPTO_MICHAEL_MIC=m 673CONFIG_CRYPTO_MICHAEL_MIC=m
633CONFIG_CRYPTO_RMD128=m 674CONFIG_CRYPTO_RMD128=m
634CONFIG_CRYPTO_RMD160=m 675CONFIG_CRYPTO_RMD160=m
635CONFIG_CRYPTO_RMD256=m 676CONFIG_CRYPTO_RMD256=m
636CONFIG_CRYPTO_RMD320=m 677CONFIG_CRYPTO_RMD320=m
637CONFIG_CRYPTO_SHA512=m
638CONFIG_CRYPTO_SHA3=m 678CONFIG_CRYPTO_SHA3=m
679CONFIG_CRYPTO_SM3=m
639CONFIG_CRYPTO_TGR192=m 680CONFIG_CRYPTO_TGR192=m
640CONFIG_CRYPTO_WP512=m 681CONFIG_CRYPTO_WP512=m
641CONFIG_CRYPTO_AES_TI=m 682CONFIG_CRYPTO_AES_TI=m
642CONFIG_CRYPTO_ANUBIS=m 683CONFIG_CRYPTO_ANUBIS=m
684CONFIG_CRYPTO_ARC4=m
643CONFIG_CRYPTO_BLOWFISH=m 685CONFIG_CRYPTO_BLOWFISH=m
644CONFIG_CRYPTO_CAMELLIA=m 686CONFIG_CRYPTO_CAMELLIA=m
645CONFIG_CRYPTO_CAST5=m 687CONFIG_CRYPTO_CAST5=m
@@ -649,16 +691,19 @@ CONFIG_CRYPTO_KHAZAD=m
649CONFIG_CRYPTO_SALSA20=m 691CONFIG_CRYPTO_SALSA20=m
650CONFIG_CRYPTO_SEED=m 692CONFIG_CRYPTO_SEED=m
651CONFIG_CRYPTO_SERPENT=m 693CONFIG_CRYPTO_SERPENT=m
694CONFIG_CRYPTO_SM4=m
652CONFIG_CRYPTO_TEA=m 695CONFIG_CRYPTO_TEA=m
653CONFIG_CRYPTO_TWOFISH=m 696CONFIG_CRYPTO_TWOFISH=m
654CONFIG_CRYPTO_842=m 697CONFIG_CRYPTO_842=m
655CONFIG_CRYPTO_LZ4=m 698CONFIG_CRYPTO_LZ4=m
656CONFIG_CRYPTO_LZ4HC=m 699CONFIG_CRYPTO_LZ4HC=m
700CONFIG_CRYPTO_ZSTD=m
657CONFIG_CRYPTO_ANSI_CPRNG=m 701CONFIG_CRYPTO_ANSI_CPRNG=m
658CONFIG_CRYPTO_USER_API_HASH=m 702CONFIG_CRYPTO_USER_API_HASH=m
659CONFIG_CRYPTO_USER_API_SKCIPHER=m 703CONFIG_CRYPTO_USER_API_SKCIPHER=m
660CONFIG_CRYPTO_USER_API_RNG=m 704CONFIG_CRYPTO_USER_API_RNG=m
661CONFIG_CRYPTO_USER_API_AEAD=m 705CONFIG_CRYPTO_USER_API_AEAD=m
706CONFIG_CRYPTO_STATS=y
662CONFIG_ZCRYPT=m 707CONFIG_ZCRYPT=m
663CONFIG_PKEY=m 708CONFIG_PKEY=m
664CONFIG_CRYPTO_PAES_S390=m 709CONFIG_CRYPTO_PAES_S390=m
@@ -669,12 +714,34 @@ CONFIG_CRYPTO_DES_S390=m
669CONFIG_CRYPTO_AES_S390=m 714CONFIG_CRYPTO_AES_S390=m
670CONFIG_CRYPTO_GHASH_S390=m 715CONFIG_CRYPTO_GHASH_S390=m
671CONFIG_CRYPTO_CRC32_S390=y 716CONFIG_CRYPTO_CRC32_S390=y
717CONFIG_CORDIC=m
718CONFIG_CRC4=m
672CONFIG_CRC7=m 719CONFIG_CRC7=m
673CONFIG_CRC8=m 720CONFIG_CRC8=m
674CONFIG_CORDIC=m 721CONFIG_DMA_CMA=y
675CONFIG_CMM=m 722CONFIG_CMA_SIZE_MBYTES=0
676CONFIG_APPLDATA_BASE=y 723CONFIG_PRINTK_TIME=y
677CONFIG_KVM=m 724CONFIG_DEBUG_INFO=y
678CONFIG_KVM_S390_UCONTROL=y 725CONFIG_DEBUG_INFO_DWARF4=y
679CONFIG_VHOST_NET=m 726CONFIG_GDB_SCRIPTS=y
680CONFIG_VHOST_VSOCK=m 727CONFIG_FRAME_WARN=1024
728CONFIG_UNUSED_SYMBOLS=y
729CONFIG_DEBUG_SECTION_MISMATCH=y
730CONFIG_MAGIC_SYSRQ=y
731CONFIG_DEBUG_MEMORY_INIT=y
732CONFIG_PANIC_ON_OOPS=y
733CONFIG_RCU_TORTURE_TEST=m
734CONFIG_RCU_CPU_STALL_TIMEOUT=60
735CONFIG_LATENCYTOP=y
736CONFIG_SCHED_TRACER=y
737CONFIG_FTRACE_SYSCALLS=y
738CONFIG_STACK_TRACER=y
739CONFIG_BLK_DEV_IO_TRACE=y
740CONFIG_FUNCTION_PROFILER=y
741CONFIG_HIST_TRIGGERS=y
742CONFIG_LKDTM=m
743CONFIG_PERCPU_TEST=m
744CONFIG_ATOMIC64_SELFTEST=y
745CONFIG_TEST_BPF=m
746CONFIG_BUG_ON_DATA_CORRUPTION=y
747CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index d92bab844b73..be09a208b608 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,27 +1,33 @@
1# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
2CONFIG_NO_HZ_IDLE=y 2CONFIG_NO_HZ_IDLE=y
3CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
4# CONFIG_CPU_ISOLATION is not set
5# CONFIG_UTS_NS is not set
6# CONFIG_PID_NS is not set
7# CONFIG_NET_NS is not set
4CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
5CONFIG_CC_OPTIMIZE_FOR_SIZE=y 9CONFIG_CC_OPTIMIZE_FOR_SIZE=y
6# CONFIG_COMPAT_BRK is not set 10# CONFIG_COMPAT_BRK is not set
7CONFIG_PARTITION_ADVANCED=y
8CONFIG_IBM_PARTITION=y
9CONFIG_DEFAULT_DEADLINE=y
10CONFIG_TUNE_ZEC12=y 11CONFIG_TUNE_ZEC12=y
11# CONFIG_COMPAT is not set 12# CONFIG_COMPAT is not set
12CONFIG_NR_CPUS=2 13CONFIG_NR_CPUS=2
13# CONFIG_HOTPLUG_CPU is not set
14CONFIG_HZ_100=y 14CONFIG_HZ_100=y
15# CONFIG_ARCH_RANDOM is not set 15# CONFIG_ARCH_RANDOM is not set
16# CONFIG_COMPACTION is not set 16# CONFIG_RELOCATABLE is not set
17# CONFIG_MIGRATION is not set
18# CONFIG_BOUNCE is not set
19# CONFIG_CHECK_STACK is not set
20# CONFIG_CHSC_SCH is not set 17# CONFIG_CHSC_SCH is not set
21# CONFIG_SCM_BUS is not set 18# CONFIG_SCM_BUS is not set
22CONFIG_CRASH_DUMP=y 19CONFIG_CRASH_DUMP=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24# CONFIG_SECCOMP is not set 20# CONFIG_SECCOMP is not set
21# CONFIG_PFAULT is not set
22# CONFIG_S390_HYPFS_FS is not set
23# CONFIG_VIRTUALIZATION is not set
24# CONFIG_S390_GUEST is not set
25CONFIG_PARTITION_ADVANCED=y
26CONFIG_IBM_PARTITION=y
27# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
28# CONFIG_COMPACTION is not set
29# CONFIG_MIGRATION is not set
30# CONFIG_BOUNCE is not set
25CONFIG_NET=y 31CONFIG_NET=y
26# CONFIG_IUCV is not set 32# CONFIG_IUCV is not set
27CONFIG_DEVTMPFS=y 33CONFIG_DEVTMPFS=y
@@ -43,7 +49,6 @@ CONFIG_ZFCP=y
43# CONFIG_HVC_IUCV is not set 49# CONFIG_HVC_IUCV is not set
44# CONFIG_HW_RANDOM_S390 is not set 50# CONFIG_HW_RANDOM_S390 is not set
45CONFIG_RAW_DRIVER=y 51CONFIG_RAW_DRIVER=y
46# CONFIG_SCLP_ASYNC is not set
47# CONFIG_HMC_DRV is not set 52# CONFIG_HMC_DRV is not set
48# CONFIG_S390_TAPE is not set 53# CONFIG_S390_TAPE is not set
49# CONFIG_VMCP is not set 54# CONFIG_VMCP is not set
@@ -56,6 +61,7 @@ CONFIG_RAW_DRIVER=y
56CONFIG_CONFIGFS_FS=y 61CONFIG_CONFIGFS_FS=y
57# CONFIG_MISC_FILESYSTEMS is not set 62# CONFIG_MISC_FILESYSTEMS is not set
58# CONFIG_NETWORK_FILESYSTEMS is not set 63# CONFIG_NETWORK_FILESYSTEMS is not set
64# CONFIG_DIMLIB is not set
59CONFIG_PRINTK_TIME=y 65CONFIG_PRINTK_TIME=y
60CONFIG_DEBUG_INFO=y 66CONFIG_DEBUG_INFO=y
61CONFIG_DEBUG_FS=y 67CONFIG_DEBUG_FS=y
@@ -64,7 +70,4 @@ CONFIG_PANIC_ON_OOPS=y
64# CONFIG_SCHED_DEBUG is not set 70# CONFIG_SCHED_DEBUG is not set
65CONFIG_RCU_CPU_STALL_TIMEOUT=60 71CONFIG_RCU_CPU_STALL_TIMEOUT=60
66# CONFIG_FTRACE is not set 72# CONFIG_FTRACE is not set
67# CONFIG_PFAULT is not set 73# CONFIG_RUNTIME_TESTING_MENU is not set
68# CONFIG_S390_HYPFS_FS is not set
69# CONFIG_VIRTUALIZATION is not set
70# CONFIG_S390_GUEST is not set
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 42f2375c203e..e1fcc03159ef 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -118,7 +118,7 @@ do { \
118 return PTR_ERR(rc); \ 118 return PTR_ERR(rc); \
119} while(0) 119} while(0)
120 120
121static int hpyfs_vm_create_guest(struct dentry *systems_dir, 121static int hypfs_vm_create_guest(struct dentry *systems_dir,
122 struct diag2fc_data *data) 122 struct diag2fc_data *data)
123{ 123{
124 char guest_name[NAME_LEN + 1] = {}; 124 char guest_name[NAME_LEN + 1] = {};
@@ -219,7 +219,7 @@ int hypfs_vm_create_files(struct dentry *root)
219 } 219 }
220 220
221 for (i = 0; i < count; i++) { 221 for (i = 0; i < count; i++) {
222 rc = hpyfs_vm_create_guest(dir, &(data[i])); 222 rc = hypfs_vm_create_guest(dir, &(data[i]));
223 if (rc) 223 if (rc)
224 goto failed; 224 goto failed;
225 } 225 }
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 9900d655014c..b8833ac983fa 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -35,6 +35,7 @@
35 35
36#include <linux/typecheck.h> 36#include <linux/typecheck.h>
37#include <linux/compiler.h> 37#include <linux/compiler.h>
38#include <linux/types.h>
38#include <asm/atomic_ops.h> 39#include <asm/atomic_ops.h>
39#include <asm/barrier.h> 40#include <asm/barrier.h>
40 41
@@ -55,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
55 return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 56 return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
56} 57}
57 58
58static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) 59static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
59{ 60{
60 unsigned long *addr = __bitops_word(nr, ptr); 61 unsigned long *addr = __bitops_word(nr, ptr);
61 unsigned long mask; 62 unsigned long mask;
@@ -76,7 +77,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
76 __atomic64_or(mask, (long *)addr); 77 __atomic64_or(mask, (long *)addr);
77} 78}
78 79
79static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) 80static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
80{ 81{
81 unsigned long *addr = __bitops_word(nr, ptr); 82 unsigned long *addr = __bitops_word(nr, ptr);
82 unsigned long mask; 83 unsigned long mask;
@@ -97,7 +98,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
97 __atomic64_and(mask, (long *)addr); 98 __atomic64_and(mask, (long *)addr);
98} 99}
99 100
100static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) 101static inline void arch_change_bit(unsigned long nr,
102 volatile unsigned long *ptr)
101{ 103{
102 unsigned long *addr = __bitops_word(nr, ptr); 104 unsigned long *addr = __bitops_word(nr, ptr);
103 unsigned long mask; 105 unsigned long mask;
@@ -118,8 +120,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
118 __atomic64_xor(mask, (long *)addr); 120 __atomic64_xor(mask, (long *)addr);
119} 121}
120 122
121static inline int 123static inline bool arch_test_and_set_bit(unsigned long nr,
122test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) 124 volatile unsigned long *ptr)
123{ 125{
124 unsigned long *addr = __bitops_word(nr, ptr); 126 unsigned long *addr = __bitops_word(nr, ptr);
125 unsigned long old, mask; 127 unsigned long old, mask;
@@ -129,8 +131,8 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
129 return (old & mask) != 0; 131 return (old & mask) != 0;
130} 132}
131 133
132static inline int 134static inline bool arch_test_and_clear_bit(unsigned long nr,
133test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) 135 volatile unsigned long *ptr)
134{ 136{
135 unsigned long *addr = __bitops_word(nr, ptr); 137 unsigned long *addr = __bitops_word(nr, ptr);
136 unsigned long old, mask; 138 unsigned long old, mask;
@@ -140,8 +142,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
140 return (old & ~mask) != 0; 142 return (old & ~mask) != 0;
141} 143}
142 144
143static inline int 145static inline bool arch_test_and_change_bit(unsigned long nr,
144test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) 146 volatile unsigned long *ptr)
145{ 147{
146 unsigned long *addr = __bitops_word(nr, ptr); 148 unsigned long *addr = __bitops_word(nr, ptr);
147 unsigned long old, mask; 149 unsigned long old, mask;
@@ -151,30 +153,31 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
151 return (old & mask) != 0; 153 return (old & mask) != 0;
152} 154}
153 155
154static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) 156static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
155{ 157{
156 unsigned char *addr = __bitops_byte(nr, ptr); 158 unsigned char *addr = __bitops_byte(nr, ptr);
157 159
158 *addr |= 1 << (nr & 7); 160 *addr |= 1 << (nr & 7);
159} 161}
160 162
161static inline void 163static inline void arch___clear_bit(unsigned long nr,
162__clear_bit(unsigned long nr, volatile unsigned long *ptr) 164 volatile unsigned long *ptr)
163{ 165{
164 unsigned char *addr = __bitops_byte(nr, ptr); 166 unsigned char *addr = __bitops_byte(nr, ptr);
165 167
166 *addr &= ~(1 << (nr & 7)); 168 *addr &= ~(1 << (nr & 7));
167} 169}
168 170
169static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) 171static inline void arch___change_bit(unsigned long nr,
172 volatile unsigned long *ptr)
170{ 173{
171 unsigned char *addr = __bitops_byte(nr, ptr); 174 unsigned char *addr = __bitops_byte(nr, ptr);
172 175
173 *addr ^= 1 << (nr & 7); 176 *addr ^= 1 << (nr & 7);
174} 177}
175 178
176static inline int 179static inline bool arch___test_and_set_bit(unsigned long nr,
177__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) 180 volatile unsigned long *ptr)
178{ 181{
179 unsigned char *addr = __bitops_byte(nr, ptr); 182 unsigned char *addr = __bitops_byte(nr, ptr);
180 unsigned char ch; 183 unsigned char ch;
@@ -184,8 +187,8 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
184 return (ch >> (nr & 7)) & 1; 187 return (ch >> (nr & 7)) & 1;
185} 188}
186 189
187static inline int 190static inline bool arch___test_and_clear_bit(unsigned long nr,
188__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) 191 volatile unsigned long *ptr)
189{ 192{
190 unsigned char *addr = __bitops_byte(nr, ptr); 193 unsigned char *addr = __bitops_byte(nr, ptr);
191 unsigned char ch; 194 unsigned char ch;
@@ -195,8 +198,8 @@ __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
195 return (ch >> (nr & 7)) & 1; 198 return (ch >> (nr & 7)) & 1;
196} 199}
197 200
198static inline int 201static inline bool arch___test_and_change_bit(unsigned long nr,
199__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) 202 volatile unsigned long *ptr)
200{ 203{
201 unsigned char *addr = __bitops_byte(nr, ptr); 204 unsigned char *addr = __bitops_byte(nr, ptr);
202 unsigned char ch; 205 unsigned char ch;
@@ -206,7 +209,8 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
206 return (ch >> (nr & 7)) & 1; 209 return (ch >> (nr & 7)) & 1;
207} 210}
208 211
209static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr) 212static inline bool arch_test_bit(unsigned long nr,
213 const volatile unsigned long *ptr)
210{ 214{
211 const volatile unsigned char *addr; 215 const volatile unsigned char *addr;
212 216
@@ -215,28 +219,30 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
215 return (*addr >> (nr & 7)) & 1; 219 return (*addr >> (nr & 7)) & 1;
216} 220}
217 221
218static inline int test_and_set_bit_lock(unsigned long nr, 222static inline bool arch_test_and_set_bit_lock(unsigned long nr,
219 volatile unsigned long *ptr) 223 volatile unsigned long *ptr)
220{ 224{
221 if (test_bit(nr, ptr)) 225 if (arch_test_bit(nr, ptr))
222 return 1; 226 return 1;
223 return test_and_set_bit(nr, ptr); 227 return arch_test_and_set_bit(nr, ptr);
224} 228}
225 229
226static inline void clear_bit_unlock(unsigned long nr, 230static inline void arch_clear_bit_unlock(unsigned long nr,
227 volatile unsigned long *ptr) 231 volatile unsigned long *ptr)
228{ 232{
229 smp_mb__before_atomic(); 233 smp_mb__before_atomic();
230 clear_bit(nr, ptr); 234 arch_clear_bit(nr, ptr);
231} 235}
232 236
233static inline void __clear_bit_unlock(unsigned long nr, 237static inline void arch___clear_bit_unlock(unsigned long nr,
234 volatile unsigned long *ptr) 238 volatile unsigned long *ptr)
235{ 239{
236 smp_mb(); 240 smp_mb();
237 __clear_bit(nr, ptr); 241 arch___clear_bit(nr, ptr);
238} 242}
239 243
244#include <asm-generic/bitops-instrumented.h>
245
240/* 246/*
241 * Functions which use MSB0 bit numbering. 247 * Functions which use MSB0 bit numbering.
242 * The bits are numbered: 248 * The bits are numbered:
@@ -261,7 +267,8 @@ static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
261 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 267 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
262} 268}
263 269
264static inline int test_and_clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) 270static inline bool test_and_clear_bit_inv(unsigned long nr,
271 volatile unsigned long *ptr)
265{ 272{
266 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 273 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
267} 274}
@@ -276,8 +283,8 @@ static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr
276 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 283 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
277} 284}
278 285
279static inline int test_bit_inv(unsigned long nr, 286static inline bool test_bit_inv(unsigned long nr,
280 const volatile unsigned long *ptr) 287 const volatile unsigned long *ptr)
281{ 288{
282 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 289 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
283} 290}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a4d38092530a..823578c6b9e2 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -177,6 +177,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
177#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 177#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
178 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 178 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
179 179
180#define ARCH_ZONE_DMA_BITS 31
181
180#include <asm-generic/memory_model.h> 182#include <asm-generic/memory_model.h>
181#include <asm-generic/getorder.h> 183#include <asm-generic/getorder.h>
182 184
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index db5ef22c46e4..f647d565bd6d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -28,7 +28,7 @@
28 * @sliba: storage list information block address 28 * @sliba: storage list information block address
29 * @sla: storage list address 29 * @sla: storage list address
30 * @slsba: storage list state block address 30 * @slsba: storage list state block address
31 * @akey: access key for DLIB 31 * @akey: access key for SLIB
32 * @bkey: access key for SL 32 * @bkey: access key for SL
33 * @ckey: access key for SBALs 33 * @ckey: access key for SBALs
34 * @dkey: access key for SLSB 34 * @dkey: access key for SLSB
@@ -50,11 +50,10 @@ struct qdesfmt0 {
50/** 50/**
51 * struct qdr - queue description record (QDR) 51 * struct qdr - queue description record (QDR)
52 * @qfmt: queue format 52 * @qfmt: queue format
53 * @pfmt: implementation dependent parameter format
54 * @ac: adapter characteristics 53 * @ac: adapter characteristics
55 * @iqdcnt: input queue descriptor count 54 * @iqdcnt: input queue descriptor count
56 * @oqdcnt: output queue descriptor count 55 * @oqdcnt: output queue descriptor count
57 * @iqdsz: inpout queue descriptor size 56 * @iqdsz: input queue descriptor size
58 * @oqdsz: output queue descriptor size 57 * @oqdsz: output queue descriptor size
59 * @qiba: queue information block address 58 * @qiba: queue information block address
60 * @qkey: queue information block key 59 * @qkey: queue information block key
@@ -62,8 +61,7 @@ struct qdesfmt0 {
62 */ 61 */
63struct qdr { 62struct qdr {
64 u32 qfmt : 8; 63 u32 qfmt : 8;
65 u32 pfmt : 8; 64 u32 : 16;
66 u32 : 8;
67 u32 ac : 8; 65 u32 ac : 8;
68 u32 : 8; 66 u32 : 8;
69 u32 iqdcnt : 8; 67 u32 iqdcnt : 8;
@@ -327,6 +325,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
327 * struct qdio_initialize - qdio initialization data 325 * struct qdio_initialize - qdio initialization data
328 * @cdev: associated ccw device 326 * @cdev: associated ccw device
329 * @q_format: queue format 327 * @q_format: queue format
328 * @qdr_ac: feature flags to set
330 * @adapter_name: name for the adapter 329 * @adapter_name: name for the adapter
331 * @qib_param_field_format: format for qib_parm_field 330 * @qib_param_field_format: format for qib_parm_field
332 * @qib_param_field: pointer to 128 bytes or NULL, if no param field 331 * @qib_param_field: pointer to 128 bytes or NULL, if no param field
@@ -338,6 +337,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
338 * @input_handler: handler to be called for input queues 337 * @input_handler: handler to be called for input queues
339 * @output_handler: handler to be called for output queues 338 * @output_handler: handler to be called for output queues
340 * @queue_start_poll_array: polling handlers (one per input queue or NULL) 339 * @queue_start_poll_array: polling handlers (one per input queue or NULL)
340 * @scan_threshold: # of in-use buffers that triggers scan on output queue
341 * @int_parm: interruption parameter 341 * @int_parm: interruption parameter
342 * @input_sbal_addr_array: address of no_input_qs * 128 pointers 342 * @input_sbal_addr_array: address of no_input_qs * 128 pointers
343 * @output_sbal_addr_array: address of no_output_qs * 128 pointers 343 * @output_sbal_addr_array: address of no_output_qs * 128 pointers
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 82deb8fc8319..70bd65724ec4 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -54,6 +54,7 @@
54#define INITRD_SIZE_OFFSET 0x10410 54#define INITRD_SIZE_OFFSET 0x10410
55#define OLDMEM_BASE_OFFSET 0x10418 55#define OLDMEM_BASE_OFFSET 0x10418
56#define OLDMEM_SIZE_OFFSET 0x10420 56#define OLDMEM_SIZE_OFFSET 0x10420
57#define KERNEL_VERSION_OFFSET 0x10428
57#define COMMAND_LINE_OFFSET 0x10480 58#define COMMAND_LINE_OFFSET 0x10480
58 59
59#ifndef __ASSEMBLY__ 60#ifndef __ASSEMBLY__
@@ -74,7 +75,8 @@ struct parmarea {
74 unsigned long initrd_size; /* 0x10410 */ 75 unsigned long initrd_size; /* 0x10410 */
75 unsigned long oldmem_base; /* 0x10418 */ 76 unsigned long oldmem_base; /* 0x10418 */
76 unsigned long oldmem_size; /* 0x10420 */ 77 unsigned long oldmem_size; /* 0x10420 */
77 char pad1[0x10480 - 0x10428]; /* 0x10428 - 0x10480 */ 78 unsigned long kernel_version; /* 0x10428 */
79 char pad1[0x10480 - 0x10430]; /* 0x10430 - 0x10480 */
78 char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */ 80 char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
79}; 81};
80 82
@@ -82,6 +84,7 @@ extern int noexec_disabled;
82extern int memory_end_set; 84extern int memory_end_set;
83extern unsigned long memory_end; 85extern unsigned long memory_end;
84extern unsigned long max_physmem_end; 86extern unsigned long max_physmem_end;
87extern unsigned long __swsusp_reset_dma;
85 88
86#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 89#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
87#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 90#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index b6755685c7b8..9e9f75ef046a 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -34,5 +34,6 @@
34#define __ARCH_WANT_SYS_FORK 34#define __ARCH_WANT_SYS_FORK
35#define __ARCH_WANT_SYS_VFORK 35#define __ARCH_WANT_SYS_VFORK
36#define __ARCH_WANT_SYS_CLONE 36#define __ARCH_WANT_SYS_CLONE
37#define __ARCH_WANT_SYS_CLONE3
37 38
38#endif /* _ASM_S390_UNISTD_H_ */ 39#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
index cefe7c7cd4f6..3ed42ff6da94 100644
--- a/arch/s390/include/uapi/asm/bpf_perf_event.h
+++ b/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ 2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__ 3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4 4
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
index fd32b1cd80d2..451ba7d08905 100644
--- a/arch/s390/include/uapi/asm/ipl.h
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_S390_UAPI_IPL_H 2#ifndef _ASM_S390_UAPI_IPL_H
3#define _ASM_S390_UAPI_IPL_H 3#define _ASM_S390_UAPI_IPL_H
4 4
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 494c34c50716..8c5755f41dde 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -20,6 +20,7 @@
20 20
21#include <linux/ioctl.h> 21#include <linux/ioctl.h>
22#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/types.h>
23 24
24/* Name of the zcrypt device driver. */ 25/* Name of the zcrypt device driver. */
25#define ZCRYPT_NAME "zcrypt" 26#define ZCRYPT_NAME "zcrypt"
@@ -160,17 +161,17 @@ struct ica_xcRB {
160 * @payload_len: Payload length 161 * @payload_len: Payload length
161 */ 162 */
162struct ep11_cprb { 163struct ep11_cprb {
163 uint16_t cprb_len; 164 __u16 cprb_len;
164 unsigned char cprb_ver_id; 165 unsigned char cprb_ver_id;
165 unsigned char pad_000[2]; 166 unsigned char pad_000[2];
166 unsigned char flags; 167 unsigned char flags;
167 unsigned char func_id[2]; 168 unsigned char func_id[2];
168 uint32_t source_id; 169 __u32 source_id;
169 uint32_t target_id; 170 __u32 target_id;
170 uint32_t ret_code; 171 __u32 ret_code;
171 uint32_t reserved1; 172 __u32 reserved1;
172 uint32_t reserved2; 173 __u32 reserved2;
173 uint32_t payload_len; 174 __u32 payload_len;
174} __attribute__((packed)); 175} __attribute__((packed));
175 176
176/** 177/**
@@ -179,8 +180,8 @@ struct ep11_cprb {
179 * @dom_id: Usage domain id 180 * @dom_id: Usage domain id
180 */ 181 */
181struct ep11_target_dev { 182struct ep11_target_dev {
182 uint16_t ap_id; 183 __u16 ap_id;
183 uint16_t dom_id; 184 __u16 dom_id;
184}; 185};
185 186
186/** 187/**
@@ -195,14 +196,14 @@ struct ep11_target_dev {
195 * @resp: Addr to response block 196 * @resp: Addr to response block
196 */ 197 */
197struct ep11_urb { 198struct ep11_urb {
198 uint16_t targets_num; 199 __u16 targets_num;
199 uint64_t targets; 200 __u64 targets;
200 uint64_t weight; 201 __u64 weight;
201 uint64_t req_no; 202 __u64 req_no;
202 uint64_t req_len; 203 __u64 req_len;
203 uint64_t req; 204 __u64 req;
204 uint64_t resp_len; 205 __u64 resp_len;
205 uint64_t resp; 206 __u64 resp;
206} __attribute__((packed)); 207} __attribute__((packed));
207 208
208/** 209/**
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index ac06c3949ab3..34bdc60c0b11 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -114,12 +114,8 @@ recursion_check:
114 * If it comes up a second time then there's something wrong going on: 114 * If it comes up a second time then there's something wrong going on:
115 * just break out and report an unknown stack type. 115 * just break out and report an unknown stack type.
116 */ 116 */
117 if (*visit_mask & (1UL << info->type)) { 117 if (*visit_mask & (1UL << info->type))
118 printk_deferred_once(KERN_WARNING
119 "WARNING: stack recursion on stack type %d\n",
120 info->type);
121 goto unknown; 118 goto unknown;
122 }
123 *visit_mask |= 1UL << info->type; 119 *visit_mask |= 1UL << info->type;
124 return 0; 120 return 0;
125unknown: 121unknown:
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 5aea1a527443..f384a18e6c26 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -60,12 +60,5 @@ ENTRY(startup_continue)
60 60
61 .align 16 61 .align 16
62.LPG1: 62.LPG1:
63.Lpcmsk:.quad 0x0000000180000000
64.L4malign:.quad 0xffffffffffc00000
65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
66.Lnop: .long 0x07000700
67.Lparmaddr:
68 .quad PARMAREA
69 .align 64
70.Ldw: .quad 0x0002000180000000,0x0000000000000000 63.Ldw: .quad 0x0002000180000000,0x0000000000000000
71.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 64.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 2c0a515428d6..6837affc19e8 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -31,7 +31,6 @@
31#include <asm/os_info.h> 31#include <asm/os_info.h>
32#include <asm/sections.h> 32#include <asm/sections.h>
33#include <asm/boot_data.h> 33#include <asm/boot_data.h>
34#include <asm/uv.h>
35#include "entry.h" 34#include "entry.h"
36 35
37#define IPL_PARM_BLOCK_VERSION 0 36#define IPL_PARM_BLOCK_VERSION 0
@@ -892,21 +891,15 @@ static void __reipl_run(void *unused)
892{ 891{
893 switch (reipl_type) { 892 switch (reipl_type) {
894 case IPL_TYPE_CCW: 893 case IPL_TYPE_CCW:
895 uv_set_shared(__pa(reipl_block_ccw));
896 diag308(DIAG308_SET, reipl_block_ccw); 894 diag308(DIAG308_SET, reipl_block_ccw);
897 uv_remove_shared(__pa(reipl_block_ccw));
898 diag308(DIAG308_LOAD_CLEAR, NULL); 895 diag308(DIAG308_LOAD_CLEAR, NULL);
899 break; 896 break;
900 case IPL_TYPE_FCP: 897 case IPL_TYPE_FCP:
901 uv_set_shared(__pa(reipl_block_fcp));
902 diag308(DIAG308_SET, reipl_block_fcp); 898 diag308(DIAG308_SET, reipl_block_fcp);
903 uv_remove_shared(__pa(reipl_block_fcp));
904 diag308(DIAG308_LOAD_CLEAR, NULL); 899 diag308(DIAG308_LOAD_CLEAR, NULL);
905 break; 900 break;
906 case IPL_TYPE_NSS: 901 case IPL_TYPE_NSS:
907 uv_set_shared(__pa(reipl_block_nss));
908 diag308(DIAG308_SET, reipl_block_nss); 902 diag308(DIAG308_SET, reipl_block_nss);
909 uv_remove_shared(__pa(reipl_block_nss));
910 diag308(DIAG308_LOAD_CLEAR, NULL); 903 diag308(DIAG308_LOAD_CLEAR, NULL);
911 break; 904 break;
912 case IPL_TYPE_UNKNOWN: 905 case IPL_TYPE_UNKNOWN:
@@ -1176,9 +1169,7 @@ static struct kset *dump_kset;
1176 1169
1177static void diag308_dump(void *dump_block) 1170static void diag308_dump(void *dump_block)
1178{ 1171{
1179 uv_set_shared(__pa(dump_block));
1180 diag308(DIAG308_SET, dump_block); 1172 diag308(DIAG308_SET, dump_block);
1181 uv_remove_shared(__pa(dump_block));
1182 while (1) { 1173 while (1) {
1183 if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302) 1174 if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
1184 break; 1175 break;
diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
index 1dded39239f8..3b664cb3ec4d 100644
--- a/arch/s390/kernel/machine_kexec_reloc.c
+++ b/arch/s390/kernel/machine_kexec_reloc.c
@@ -1,5 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/elf.h> 2#include <linux/elf.h>
3#include <asm/kexec.h>
3 4
4int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, 5int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
5 unsigned long addr) 6 unsigned long addr)
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index d4e031f7b9c8..5f1fd1581330 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -34,7 +34,7 @@ struct cf_diag_csd { /* Counter set data per CPU */
34 unsigned char start[PAGE_SIZE]; /* Counter set at event start */ 34 unsigned char start[PAGE_SIZE]; /* Counter set at event start */
35 unsigned char data[PAGE_SIZE]; /* Counter set at event delete */ 35 unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
36}; 36};
37DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); 37static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
38 38
39/* Counter sets are stored as data stream in a page sized memory buffer and 39/* Counter sets are stored as data stream in a page sized memory buffer and
40 * exported to user space via raw data attached to the event sample data. 40 * exported to user space via raw data attached to the event sample data.
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2b94b0ad3588..253177900950 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1114,8 +1114,7 @@ void __init setup_arch(char **cmdline_p)
1114 1114
1115 ROOT_DEV = Root_RAM0; 1115 ROOT_DEV = Root_RAM0;
1116 1116
1117 /* Is init_mm really needed? */ 1117 init_mm.start_code = (unsigned long) _text;
1118 init_mm.start_code = PAGE_OFFSET;
1119 init_mm.end_code = (unsigned long) _etext; 1118 init_mm.end_code = (unsigned long) _etext;
1120 init_mm.end_data = (unsigned long) _edata; 1119 init_mm.end_data = (unsigned long) _edata;
1121 init_mm.brk = (unsigned long) _end; 1120 init_mm.brk = (unsigned long) _end;
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index a90d3e945445..3054e9c035a3 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -437,4 +437,4 @@
437432 common fsmount sys_fsmount sys_fsmount 437432 common fsmount sys_fsmount sys_fsmount
438433 common fspick sys_fspick sys_fspick 438433 common fspick sys_fspick sys_fspick
439434 common pidfd_open sys_pidfd_open sys_pidfd_open 439434 common pidfd_open sys_pidfd_open sys_pidfd_open
440# 435 reserved for clone3 440435 common clone3 sys_clone3 sys_clone3
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 243d8b1185bf..c6bc190f3c28 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -216,11 +216,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
216 216
217 if (!vdso_enabled) 217 if (!vdso_enabled)
218 return 0; 218 return 0;
219 /*
220 * Only map the vdso for dynamically linked elf binaries.
221 */
222 if (!uses_interp)
223 return 0;
224 219
225 vdso_pages = vdso64_pages; 220 vdso_pages = vdso64_pages;
226#ifdef CONFIG_COMPAT_VDSO 221#ifdef CONFIG_COMPAT_VDSO
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 49d55327de0b..7e0eb4020917 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -32,10 +32,9 @@ PHDRS {
32SECTIONS 32SECTIONS
33{ 33{
34 . = 0x100000; 34 . = 0x100000;
35 _stext = .; /* Start of text section */
36 .text : { 35 .text : {
37 /* Text and read-only data */ 36 _stext = .; /* Start of text section */
38 _text = .; 37 _text = .; /* Text and read-only data */
39 HEAD_TEXT 38 HEAD_TEXT
40 TEXT_TEXT 39 TEXT_TEXT
41 SCHED_TEXT 40 SCHED_TEXT
@@ -47,11 +46,10 @@ SECTIONS
47 *(.text.*_indirect_*) 46 *(.text.*_indirect_*)
48 *(.fixup) 47 *(.fixup)
49 *(.gnu.warning) 48 *(.gnu.warning)
49 . = ALIGN(PAGE_SIZE);
50 _etext = .; /* End of text section */
50 } :text = 0x0700 51 } :text = 0x0700
51 52
52 . = ALIGN(PAGE_SIZE);
53 _etext = .; /* End of text section */
54
55 NOTES :text :note 53 NOTES :text :note
56 54
57 .dummy : { *(.dummy) } :data 55 .dummy : { *(.dummy) } :data
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3f520cd837fb..f329dcb3f44c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2516,16 +2516,6 @@ out_err:
2516 return rc; 2516 return rc;
2517} 2517}
2518 2518
2519bool kvm_arch_has_vcpu_debugfs(void)
2520{
2521 return false;
2522}
2523
2524int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2525{
2526 return 0;
2527}
2528
2529void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 2519void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2530{ 2520{
2531 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 2521 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index 96580590ccaf..29d9470dbceb 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -9,6 +9,7 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/raid/xor.h> 11#include <linux/raid/xor.h>
12#include <asm/xor.h>
12 13
13static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 14static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
14{ 15{
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 3b93ba0b5d8d..5d67b81c704a 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -161,9 +161,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
161 } 161 }
162#endif 162#endif
163 163
164 for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { 164 pmd = pmd_offset(pud, addr);
165 for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
165 st->current_address = addr; 166 st->current_address = addr;
166 pmd = pmd_offset(pud, addr);
167 if (!pmd_none(*pmd)) { 167 if (!pmd_none(*pmd)) {
168 if (pmd_large(*pmd)) { 168 if (pmd_large(*pmd)) {
169 prot = pmd_val(*pmd) & 169 prot = pmd_val(*pmd) &
@@ -192,9 +192,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
192 } 192 }
193#endif 193#endif
194 194
195 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { 195 pud = pud_offset(p4d, addr);
196 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
196 st->current_address = addr; 197 st->current_address = addr;
197 pud = pud_offset(p4d, addr);
198 if (!pud_none(*pud)) 198 if (!pud_none(*pud))
199 if (pud_large(*pud)) { 199 if (pud_large(*pud)) {
200 prot = pud_val(*pud) & 200 prot = pud_val(*pud) &
@@ -222,9 +222,9 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
222 } 222 }
223#endif 223#endif
224 224
225 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { 225 p4d = p4d_offset(pgd, addr);
226 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
226 st->current_address = addr; 227 st->current_address = addr;
227 p4d = p4d_offset(pgd, addr);
228 if (!p4d_none(*p4d)) 228 if (!p4d_none(*p4d))
229 walk_pud_level(m, st, p4d, addr); 229 walk_pud_level(m, st, p4d, addr);
230 else 230 else
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 63507662828f..7b0bb475c166 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -327,6 +327,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
327 case VM_FAULT_BADACCESS: 327 case VM_FAULT_BADACCESS:
328 if (access == VM_EXEC && signal_return(regs) == 0) 328 if (access == VM_EXEC && signal_return(regs) == 0)
329 break; 329 break;
330 /* fallthrough */
330 case VM_FAULT_BADMAP: 331 case VM_FAULT_BADMAP:
331 /* Bad memory access. Check if it is kernel or user space. */ 332 /* Bad memory access. Check if it is kernel or user space. */
332 if (user_mode(regs)) { 333 if (user_mode(regs)) {
@@ -336,7 +337,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
336 do_sigsegv(regs, si_code); 337 do_sigsegv(regs, si_code);
337 break; 338 break;
338 } 339 }
340 /* fallthrough */
339 case VM_FAULT_BADCONTEXT: 341 case VM_FAULT_BADCONTEXT:
342 /* fallthrough */
340 case VM_FAULT_PFAULT: 343 case VM_FAULT_PFAULT:
341 do_no_context(regs); 344 do_no_context(regs);
342 break; 345 break;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 1e668b95e0c6..39c3a6e3d262 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2424,8 +2424,8 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2424 * This function is assumed to be called with the guest_table_lock 2424 * This function is assumed to be called with the guest_table_lock
2425 * held. 2425 * held.
2426 */ 2426 */
2427bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp, 2427static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2428 unsigned long gaddr) 2428 unsigned long gaddr)
2429{ 2429{
2430 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 2430 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2431 return false; 2431 return false;
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 99e06213a22b..54fcdf66ae96 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -17,8 +17,6 @@
17 17
18#ifdef CONFIG_PGSTE 18#ifdef CONFIG_PGSTE
19 19
20static int page_table_allocate_pgste_min = 0;
21static int page_table_allocate_pgste_max = 1;
22int page_table_allocate_pgste = 0; 20int page_table_allocate_pgste = 0;
23EXPORT_SYMBOL(page_table_allocate_pgste); 21EXPORT_SYMBOL(page_table_allocate_pgste);
24 22
@@ -29,8 +27,8 @@ static struct ctl_table page_table_sysctl[] = {
29 .maxlen = sizeof(int), 27 .maxlen = sizeof(int),
30 .mode = S_IRUGO | S_IWUSR, 28 .mode = S_IRUGO | S_IWUSR,
31 .proc_handler = proc_dointvec_minmax, 29 .proc_handler = proc_dointvec_minmax,
32 .extra1 = &page_table_allocate_pgste_min, 30 .extra1 = SYSCTL_ZERO,
33 .extra2 = &page_table_allocate_pgste_max, 31 .extra2 = SYSCTL_ONE,
34 }, 32 },
35 { } 33 { }
36}; 34};
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e636728ab452..955eb355c2fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
863 break; 863 break;
864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
865 /* lcgr %dst,%dst */ 865 /* lcgr %dst,%dst */
866 EMIT4(0xb9130000, dst_reg, dst_reg); 866 EMIT4(0xb9030000, dst_reg, dst_reg);
867 break; 867 break;
868 /* 868 /*
869 * BPF_FROM_BE/LE 869 * BPF_FROM_BE/LE
@@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1049 /* llgf %w1,map.max_entries(%b2) */ 1049 /* llgf %w1,map.max_entries(%b2) */
1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1051 offsetof(struct bpf_array, map.max_entries)); 1051 offsetof(struct bpf_array, map.max_entries));
1052 /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ 1052 /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
1053 EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, 1053 EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1054 REG_W1, 0, 0xa); 1054 REG_W1, 0, 0xa);
1055 1055
1056 /* 1056 /*
@@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1076 * goto out; 1076 * goto out;
1077 */ 1077 */
1078 1078
1079 /* sllg %r1,%b3,3: %r1 = index * 8 */ 1079 /* llgfr %r1,%b3: %r1 = (u32) index */
1080 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); 1080 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1081 /* sllg %r1,%r1,3: %r1 *= 8 */
1082 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1081 /* lg %r1,prog(%b2,%r1) */ 1083 /* lg %r1,prog(%b2,%r1) */
1082 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, 1084 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1083 REG_1, offsetof(struct bpf_array, ptrs)); 1085 REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index 884a9caff5fb..f4f4c2c6dee9 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -11,8 +11,7 @@ chkbss: $(addprefix $(obj)/, $(chkbss-files))
11 11
12quiet_cmd_chkbss = CHKBSS $< 12quiet_cmd_chkbss = CHKBSS $<
13 cmd_chkbss = \ 13 cmd_chkbss = \
14 if $(OBJDUMP) -h $< | grep -q "\.bss" && \ 14 if ! $(OBJSIZE) --common $< | $(AWK) 'END { if ($$3) exit 1 }'; then \
15 ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
16 echo "error: $< .bss section is not empty" >&2; exit 1; \ 15 echo "error: $< .bss section is not empty" >&2; exit 1; \
17 fi; \ 16 fi; \
18 touch $@; 17 touch $@;
diff --git a/arch/sh/include/uapi/asm/setup.h b/arch/sh/include/uapi/asm/setup.h
index 1170dd2fb998..4bd19f80f9b0 100644
--- a/arch/sh/include/uapi/asm/setup.h
+++ b/arch/sh/include/uapi/asm/setup.h
@@ -1,2 +1,2 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/setup.h> 2#include <asm-generic/setup.h>
diff --git a/arch/sh/include/uapi/asm/types.h b/arch/sh/include/uapi/asm/types.h
index f83795fdc0da..68100e108ea6 100644
--- a/arch/sh/include/uapi/asm/types.h
+++ b/arch/sh/include/uapi/asm/types.h
@@ -1,2 +1,2 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/types.h> 2#include <asm-generic/types.h>
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index defebf1a9c8a..845543780cc5 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
475 printk("dbr"); 475 printk("dbr");
476 break; 476 break;
477 case FD_REG_N: 477 case FD_REG_N:
478 if (0)
479 goto d_reg_n;
480 case F_REG_N: 478 case F_REG_N:
481 printk("fr%d", rn); 479 printk("fr%d", rn);
482 break; 480 break;
@@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
488 printk("xd%d", rn & ~1); 486 printk("xd%d", rn & ~1);
489 break; 487 break;
490 } 488 }
491 d_reg_n: 489 /* else, fall through */
492 case D_REG_N: 490 case D_REG_N:
493 printk("dr%d", rn); 491 printk("dr%d", rn);
494 break; 492 break;
@@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
497 printk("xd%d", rm & ~1); 495 printk("xd%d", rm & ~1);
498 break; 496 break;
499 } 497 }
498 /* else, fall through */
500 case D_REG_M: 499 case D_REG_M:
501 printk("dr%d", rm); 500 printk("dr%d", rm);
502 break; 501 break;
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 3bd010b4c55f..f10d64311127 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
157 switch (sh_type) { 157 switch (sh_type) {
158 case SH_BREAKPOINT_READ: 158 case SH_BREAKPOINT_READ:
159 *gen_type = HW_BREAKPOINT_R; 159 *gen_type = HW_BREAKPOINT_R;
160 break;
160 case SH_BREAKPOINT_WRITE: 161 case SH_BREAKPOINT_WRITE:
161 *gen_type = HW_BREAKPOINT_W; 162 *gen_type = HW_BREAKPOINT_W;
162 break; 163 break;
diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
index 64c67f2ea33f..0dace69058ab 100644
--- a/arch/sparc/include/uapi/asm/oradax.h
+++ b/arch/sparc/include/uapi/asm/oradax.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 1/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
4 */ 4 */
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
index 8574338bf23b..9991ec2371e4 100644
--- a/arch/um/include/shared/timer-internal.h
+++ b/arch/um/include/shared/timer-internal.h
@@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns)
34 time_travel_time = ns; 34 time_travel_time = ns;
35} 35}
36 36
37static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 37static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
38 unsigned long long expiry)
39{ 38{
40 time_travel_timer_mode = mode; 39 time_travel_timer_mode = mode;
40}
41
42static inline void time_travel_set_timer_expiry(unsigned long long expiry)
43{
41 time_travel_timer_expiry = expiry; 44 time_travel_timer_expiry = expiry;
42} 45}
43#else 46#else
@@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns)
50{ 53{
51} 54}
52 55
53static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 56static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
54 unsigned long long expiry) 57{
58}
59
60static inline void time_travel_set_timer_expiry(unsigned long long expiry)
55{ 61{
56} 62}
57 63
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 67c0d1a860e9..6bede7888fc2 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration)
213 if (time_travel_timer_mode != TT_TMR_DISABLED || 213 if (time_travel_timer_mode != TT_TMR_DISABLED ||
214 time_travel_timer_expiry < next) { 214 time_travel_timer_expiry < next) {
215 if (time_travel_timer_mode == TT_TMR_ONESHOT) 215 if (time_travel_timer_mode == TT_TMR_ONESHOT)
216 time_travel_set_timer(TT_TMR_DISABLED, 0); 216 time_travel_set_timer_mode(TT_TMR_DISABLED);
217 /* 217 /*
218 * time_travel_time will be adjusted in the timer 218 * time_travel_time will be adjusted in the timer
219 * IRQ handler so it works even when the signal 219 * IRQ handler so it works even when the signal
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 6a051b078359..234757233355 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
50static int itimer_shutdown(struct clock_event_device *evt) 50static int itimer_shutdown(struct clock_event_device *evt)
51{ 51{
52 if (time_travel_mode != TT_MODE_OFF) 52 if (time_travel_mode != TT_MODE_OFF)
53 time_travel_set_timer(TT_TMR_DISABLED, 0); 53 time_travel_set_timer_mode(TT_TMR_DISABLED);
54 54
55 if (time_travel_mode != TT_MODE_INFCPU) 55 if (time_travel_mode != TT_MODE_INFCPU)
56 os_timer_disable(); 56 os_timer_disable();
@@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt)
62{ 62{
63 unsigned long long interval = NSEC_PER_SEC / HZ; 63 unsigned long long interval = NSEC_PER_SEC / HZ;
64 64
65 if (time_travel_mode != TT_MODE_OFF) 65 if (time_travel_mode != TT_MODE_OFF) {
66 time_travel_set_timer(TT_TMR_PERIODIC, 66 time_travel_set_timer_mode(TT_TMR_PERIODIC);
67 time_travel_time + interval); 67 time_travel_set_timer_expiry(time_travel_time + interval);
68 }
68 69
69 if (time_travel_mode != TT_MODE_INFCPU) 70 if (time_travel_mode != TT_MODE_INFCPU)
70 os_timer_set_interval(interval); 71 os_timer_set_interval(interval);
@@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta,
77{ 78{
78 delta += 1; 79 delta += 1;
79 80
80 if (time_travel_mode != TT_MODE_OFF) 81 if (time_travel_mode != TT_MODE_OFF) {
81 time_travel_set_timer(TT_TMR_ONESHOT, 82 time_travel_set_timer_mode(TT_TMR_ONESHOT);
82 time_travel_time + delta); 83 time_travel_set_timer_expiry(time_travel_time + delta);
84 }
83 85
84 if (time_travel_mode != TT_MODE_INFCPU) 86 if (time_travel_mode != TT_MODE_INFCPU)
85 return os_timer_one_shot(delta); 87 return os_timer_one_shot(delta);
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 56e748a7679f..94df0868804b 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
38 38
39REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) 39REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
40REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) 40REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
41REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
41REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) 42REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
42export REALMODE_CFLAGS 43export REALMODE_CFLAGS
43 44
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 5f2d03067ae5..c8862696a47b 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -72,6 +72,8 @@ static unsigned long find_trampoline_placement(void)
72 72
73 /* Find the first usable memory region under bios_start. */ 73 /* Find the first usable memory region under bios_start. */
74 for (i = boot_params->e820_entries - 1; i >= 0; i--) { 74 for (i = boot_params->e820_entries - 1; i >= 0; i--) {
75 unsigned long new = bios_start;
76
75 entry = &boot_params->e820_table[i]; 77 entry = &boot_params->e820_table[i];
76 78
77 /* Skip all entries above bios_start. */ 79 /* Skip all entries above bios_start. */
@@ -84,15 +86,20 @@ static unsigned long find_trampoline_placement(void)
84 86
85 /* Adjust bios_start to the end of the entry if needed. */ 87 /* Adjust bios_start to the end of the entry if needed. */
86 if (bios_start > entry->addr + entry->size) 88 if (bios_start > entry->addr + entry->size)
87 bios_start = entry->addr + entry->size; 89 new = entry->addr + entry->size;
88 90
89 /* Keep bios_start page-aligned. */ 91 /* Keep bios_start page-aligned. */
90 bios_start = round_down(bios_start, PAGE_SIZE); 92 new = round_down(new, PAGE_SIZE);
91 93
92 /* Skip the entry if it's too small. */ 94 /* Skip the entry if it's too small. */
93 if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) 95 if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
94 continue; 96 continue;
95 97
98 /* Protect against underflow. */
99 if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
100 break;
101
102 bios_start = new;
96 break; 103 break;
97 } 104 }
98 105
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 401e30ca0a75..8272a4492844 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -37,6 +37,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
37 return diff; 37 return diff;
38} 38}
39 39
40/*
41 * Clang may lower `memcmp == 0` to `bcmp == 0`.
42 */
43int bcmp(const void *s1, const void *s2, size_t len)
44{
45 return memcmp(s1, s2, len);
46}
47
40int strcmp(const char *str1, const char *str2) 48int strcmp(const char *str1, const char *str2)
41{ 49{
42 const unsigned char *s1 = (const unsigned char *)str1; 50 const unsigned char *s1 = (const unsigned char *)str1;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 830bd984182b..515c0ceeb4a3 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -314,6 +314,23 @@ For 32-bit we have the following conventions - kernel is built with
314 314
315#endif 315#endif
316 316
317/*
318 * Mitigate Spectre v1 for conditional swapgs code paths.
319 *
320 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
321 * prevent a speculative swapgs when coming from kernel space.
322 *
323 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
324 * to prevent the swapgs from getting speculatively skipped when coming from
325 * user space.
326 */
327.macro FENCE_SWAPGS_USER_ENTRY
328 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
329.endm
330.macro FENCE_SWAPGS_KERNEL_ENTRY
331 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
332.endm
333
317.macro STACKLEAK_ERASE_NOCLOBBER 334.macro STACKLEAK_ERASE_NOCLOBBER
318#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 335#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
319 PUSH_AND_CLEAR_REGS 336 PUSH_AND_CLEAR_REGS
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2bb986f305ac..4f86928246e7 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1443,8 +1443,12 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1443 1443
1444ENTRY(page_fault) 1444ENTRY(page_fault)
1445 ASM_CLAC 1445 ASM_CLAC
1446 pushl $0; /* %gs's slot on the stack */ 1446 pushl $do_page_fault
1447 jmp common_exception_read_cr2
1448END(page_fault)
1447 1449
1450common_exception_read_cr2:
1451 /* the function address is in %gs's slot on the stack */
1448 SAVE_ALL switch_stacks=1 skip_gs=1 1452 SAVE_ALL switch_stacks=1 skip_gs=1
1449 1453
1450 ENCODE_FRAME_POINTER 1454 ENCODE_FRAME_POINTER
@@ -1452,6 +1456,7 @@ ENTRY(page_fault)
1452 1456
1453 /* fixup %gs */ 1457 /* fixup %gs */
1454 GS_TO_REG %ecx 1458 GS_TO_REG %ecx
1459 movl PT_GS(%esp), %edi
1455 REG_TO_PTGS %ecx 1460 REG_TO_PTGS %ecx
1456 SET_KERNEL_GS %ecx 1461 SET_KERNEL_GS %ecx
1457 1462
@@ -1463,9 +1468,9 @@ ENTRY(page_fault)
1463 1468
1464 TRACE_IRQS_OFF 1469 TRACE_IRQS_OFF
1465 movl %esp, %eax # pt_regs pointer 1470 movl %esp, %eax # pt_regs pointer
1466 call do_page_fault 1471 CALL_NOSPEC %edi
1467 jmp ret_from_exception 1472 jmp ret_from_exception
1468END(page_fault) 1473END(common_exception_read_cr2)
1469 1474
1470common_exception: 1475common_exception:
1471 /* the function address is in %gs's slot on the stack */ 1476 /* the function address is in %gs's slot on the stack */
@@ -1595,7 +1600,7 @@ END(general_protection)
1595ENTRY(async_page_fault) 1600ENTRY(async_page_fault)
1596 ASM_CLAC 1601 ASM_CLAC
1597 pushl $do_async_page_fault 1602 pushl $do_async_page_fault
1598 jmp common_exception 1603 jmp common_exception_read_cr2
1599END(async_page_fault) 1604END(async_page_fault)
1600#endif 1605#endif
1601 1606
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3f5a978a02a7..be9ca198c581 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -519,7 +519,7 @@ ENTRY(interrupt_entry)
519 testb $3, CS-ORIG_RAX+8(%rsp) 519 testb $3, CS-ORIG_RAX+8(%rsp)
520 jz 1f 520 jz 1f
521 SWAPGS 521 SWAPGS
522 522 FENCE_SWAPGS_USER_ENTRY
523 /* 523 /*
524 * Switch to the thread stack. The IRET frame and orig_ax are 524 * Switch to the thread stack. The IRET frame and orig_ax are
525 * on the stack, as well as the return address. RDI..R12 are 525 * on the stack, as well as the return address. RDI..R12 are
@@ -549,8 +549,10 @@ ENTRY(interrupt_entry)
549 UNWIND_HINT_FUNC 549 UNWIND_HINT_FUNC
550 550
551 movq (%rdi), %rdi 551 movq (%rdi), %rdi
552 jmp 2f
5521: 5531:
553 554 FENCE_SWAPGS_KERNEL_ENTRY
5552:
554 PUSH_AND_CLEAR_REGS save_ret=1 556 PUSH_AND_CLEAR_REGS save_ret=1
555 ENCODE_FRAME_POINTER 8 557 ENCODE_FRAME_POINTER 8
556 558
@@ -1238,6 +1240,13 @@ ENTRY(paranoid_entry)
1238 */ 1240 */
1239 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 1241 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
1240 1242
1243 /*
1244 * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
1245 * unconditional CR3 write, even in the PTI case. So do an lfence
1246 * to prevent GS speculation, regardless of whether PTI is enabled.
1247 */
1248 FENCE_SWAPGS_KERNEL_ENTRY
1249
1241 ret 1250 ret
1242END(paranoid_entry) 1251END(paranoid_entry)
1243 1252
@@ -1288,6 +1297,7 @@ ENTRY(error_entry)
1288 * from user mode due to an IRET fault. 1297 * from user mode due to an IRET fault.
1289 */ 1298 */
1290 SWAPGS 1299 SWAPGS
1300 FENCE_SWAPGS_USER_ENTRY
1291 /* We have user CR3. Change to kernel CR3. */ 1301 /* We have user CR3. Change to kernel CR3. */
1292 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1302 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1293 1303
@@ -1301,6 +1311,8 @@ ENTRY(error_entry)
1301 pushq %r12 1311 pushq %r12
1302 ret 1312 ret
1303 1313
1314.Lerror_entry_done_lfence:
1315 FENCE_SWAPGS_KERNEL_ENTRY
1304.Lerror_entry_done: 1316.Lerror_entry_done:
1305 ret 1317 ret
1306 1318
@@ -1318,7 +1330,7 @@ ENTRY(error_entry)
1318 cmpq %rax, RIP+8(%rsp) 1330 cmpq %rax, RIP+8(%rsp)
1319 je .Lbstep_iret 1331 je .Lbstep_iret
1320 cmpq $.Lgs_change, RIP+8(%rsp) 1332 cmpq $.Lgs_change, RIP+8(%rsp)
1321 jne .Lerror_entry_done 1333 jne .Lerror_entry_done_lfence
1322 1334
1323 /* 1335 /*
1324 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1336 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
@@ -1326,6 +1338,7 @@ ENTRY(error_entry)
1326 * .Lgs_change's error handler with kernel gsbase. 1338 * .Lgs_change's error handler with kernel gsbase.
1327 */ 1339 */
1328 SWAPGS 1340 SWAPGS
1341 FENCE_SWAPGS_USER_ENTRY
1329 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1342 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1330 jmp .Lerror_entry_done 1343 jmp .Lerror_entry_done
1331 1344
@@ -1340,6 +1353,7 @@ ENTRY(error_entry)
1340 * gsbase and CR3. Switch to kernel gsbase and CR3: 1353 * gsbase and CR3. Switch to kernel gsbase and CR3:
1341 */ 1354 */
1342 SWAPGS 1355 SWAPGS
1356 FENCE_SWAPGS_USER_ENTRY
1343 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1357 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1344 1358
1345 /* 1359 /*
@@ -1431,6 +1445,7 @@ ENTRY(nmi)
1431 1445
1432 swapgs 1446 swapgs
1433 cld 1447 cld
1448 FENCE_SWAPGS_USER_ENTRY
1434 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 1449 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1435 movq %rsp, %rdx 1450 movq %rsp, %rdx
1436 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1451 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 62f317c9113a..5b35b7ea5d72 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -661,10 +661,17 @@ fail:
661 661
662 throttle = perf_event_overflow(event, &data, &regs); 662 throttle = perf_event_overflow(event, &data, &regs);
663out: 663out:
664 if (throttle) 664 if (throttle) {
665 perf_ibs_stop(event, 0); 665 perf_ibs_stop(event, 0);
666 else 666 } else {
667 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); 667 period >>= 4;
668
669 if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
670 (*config & IBS_OP_CNT_CTL))
671 period |= *config & IBS_OP_CUR_CNT_RAND;
672
673 perf_ibs_enable_event(perf_ibs, hwc, period);
674 }
668 675
669 perf_event_update_userpage(event); 676 perf_event_update_userpage(event);
670 677
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index cfe256ca76df..4886fc66fd88 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1236 * Add a single event to the PMU. 1236 * Add a single event to the PMU.
1237 * 1237 *
1238 * The event is added to the group of enabled events 1238 * The event is added to the group of enabled events
1239 * but only if it can be scehduled with existing events. 1239 * but only if it can be scheduled with existing events.
1240 */ 1240 */
1241static int x86_pmu_add(struct perf_event *event, int flags) 1241static int x86_pmu_add(struct perf_event *event, int flags)
1242{ 1242{
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9e911a96972b..e4c2cb65ea50 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -20,7 +20,6 @@
20#include <asm/intel-family.h> 20#include <asm/intel-family.h>
21#include <asm/apic.h> 21#include <asm/apic.h>
22#include <asm/cpu_device_id.h> 22#include <asm/cpu_device_id.h>
23#include <asm/hypervisor.h>
24 23
25#include "../perf_event.h" 24#include "../perf_event.h"
26 25
@@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
263}; 262};
264 263
265static struct extra_reg intel_icl_extra_regs[] __read_mostly = { 264static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
266 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0), 265 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
267 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1), 266 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
268 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 267 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
269 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 268 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
270 EVENT_EXTRA_END 269 EVENT_EXTRA_END
@@ -3573,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
3573 return left; 3572 return left;
3574} 3573}
3575 3574
3575static u64 nhm_limit_period(struct perf_event *event, u64 left)
3576{
3577 return max(left, 32ULL);
3578}
3579
3576PMU_FORMAT_ATTR(event, "config:0-7" ); 3580PMU_FORMAT_ATTR(event, "config:0-7" );
3577PMU_FORMAT_ATTR(umask, "config:8-15" ); 3581PMU_FORMAT_ATTR(umask, "config:8-15" );
3578PMU_FORMAT_ATTR(edge, "config:18" ); 3582PMU_FORMAT_ATTR(edge, "config:18" );
@@ -4053,7 +4057,7 @@ static bool check_msr(unsigned long msr, u64 mask)
4053 * Disable the check for real HW, so we don't 4057 * Disable the check for real HW, so we don't
4054 * mess with potentionaly enabled registers: 4058 * mess with potentionaly enabled registers:
4055 */ 4059 */
4056 if (hypervisor_is_type(X86_HYPER_NATIVE)) 4060 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4057 return true; 4061 return true;
4058 4062
4059 /* 4063 /*
@@ -4607,6 +4611,7 @@ __init int intel_pmu_init(void)
4607 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 4611 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4608 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 4612 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4609 x86_pmu.extra_regs = intel_nehalem_extra_regs; 4613 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4614 x86_pmu.limit_period = nhm_limit_period;
4610 4615
4611 mem_attr = nhm_mem_events_attrs; 4616 mem_attr = nhm_mem_events_attrs;
4612 4617
@@ -4955,6 +4960,7 @@ __init int intel_pmu_init(void)
4955 4960
4956 case INTEL_FAM6_SKYLAKE_X: 4961 case INTEL_FAM6_SKYLAKE_X:
4957 pmem = true; 4962 pmem = true;
4963 /* fall through */
4958 case INTEL_FAM6_SKYLAKE_MOBILE: 4964 case INTEL_FAM6_SKYLAKE_MOBILE:
4959 case INTEL_FAM6_SKYLAKE_DESKTOP: 4965 case INTEL_FAM6_SKYLAKE_DESKTOP:
4960 case INTEL_FAM6_KABYLAKE_MOBILE: 4966 case INTEL_FAM6_KABYLAKE_MOBILE:
@@ -5004,6 +5010,7 @@ __init int intel_pmu_init(void)
5004 case INTEL_FAM6_ICELAKE_X: 5010 case INTEL_FAM6_ICELAKE_X:
5005 case INTEL_FAM6_ICELAKE_XEON_D: 5011 case INTEL_FAM6_ICELAKE_XEON_D:
5006 pmem = true; 5012 pmem = true;
5013 /* fall through */
5007 case INTEL_FAM6_ICELAKE_MOBILE: 5014 case INTEL_FAM6_ICELAKE_MOBILE:
5008 case INTEL_FAM6_ICELAKE_DESKTOP: 5015 case INTEL_FAM6_ICELAKE_DESKTOP:
5009 x86_pmu.late_ack = true; 5016 x86_pmu.late_ack = true;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2c8db2c19328..f1269e804e9b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
851 851
852struct event_constraint intel_icl_pebs_event_constraints[] = { 852struct event_constraint intel_icl_pebs_event_constraints[] = {
853 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ 853 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */ 854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
855 855
856 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 856 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
857 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ 857 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 101eb944f13c..9e5f3c722c33 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -18,6 +18,20 @@
18 * Note: efi_info is commonly left uninitialized, but that field has a 18 * Note: efi_info is commonly left uninitialized, but that field has a
19 * private magic, so it is better to leave it unchanged. 19 * private magic, so it is better to leave it unchanged.
20 */ 20 */
21
22#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
23
24#define BOOT_PARAM_PRESERVE(struct_member) \
25 { \
26 .start = offsetof(struct boot_params, struct_member), \
27 .len = sizeof_mbr(struct boot_params, struct_member), \
28 }
29
30struct boot_params_to_save {
31 unsigned int start;
32 unsigned int len;
33};
34
21static void sanitize_boot_params(struct boot_params *boot_params) 35static void sanitize_boot_params(struct boot_params *boot_params)
22{ 36{
23 /* 37 /*
@@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
35 * problems again. 49 * problems again.
36 */ 50 */
37 if (boot_params->sentinel) { 51 if (boot_params->sentinel) {
38 /* fields in boot_params are left uninitialized, clear them */ 52 static struct boot_params scratch;
39 boot_params->acpi_rsdp_addr = 0; 53 char *bp_base = (char *)boot_params;
40 memset(&boot_params->ext_ramdisk_image, 0, 54 char *save_base = (char *)&scratch;
41 (char *)&boot_params->efi_info - 55 int i;
42 (char *)&boot_params->ext_ramdisk_image); 56
43 memset(&boot_params->kbd_status, 0, 57 const struct boot_params_to_save to_save[] = {
44 (char *)&boot_params->hdr - 58 BOOT_PARAM_PRESERVE(screen_info),
45 (char *)&boot_params->kbd_status); 59 BOOT_PARAM_PRESERVE(apm_bios_info),
46 memset(&boot_params->_pad7[0], 0, 60 BOOT_PARAM_PRESERVE(tboot_addr),
47 (char *)&boot_params->edd_mbr_sig_buffer[0] - 61 BOOT_PARAM_PRESERVE(ist_info),
48 (char *)&boot_params->_pad7[0]); 62 BOOT_PARAM_PRESERVE(hd0_info),
49 memset(&boot_params->_pad8[0], 0, 63 BOOT_PARAM_PRESERVE(hd1_info),
50 (char *)&boot_params->eddbuf[0] - 64 BOOT_PARAM_PRESERVE(sys_desc_table),
51 (char *)&boot_params->_pad8[0]); 65 BOOT_PARAM_PRESERVE(olpc_ofw_header),
52 memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); 66 BOOT_PARAM_PRESERVE(efi_info),
67 BOOT_PARAM_PRESERVE(alt_mem_k),
68 BOOT_PARAM_PRESERVE(scratch),
69 BOOT_PARAM_PRESERVE(e820_entries),
70 BOOT_PARAM_PRESERVE(eddbuf_entries),
71 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
73 BOOT_PARAM_PRESERVE(hdr),
74 BOOT_PARAM_PRESERVE(e820_table),
75 BOOT_PARAM_PRESERVE(eddbuf),
76 };
77
78 memset(&scratch, 0, sizeof(scratch));
79
80 for (i = 0; i < ARRAY_SIZE(to_save); i++) {
81 memcpy(save_base + to_save[i].start,
82 bp_base + to_save[i].start, to_save[i].len);
83 }
84
85 memcpy(boot_params, save_base, sizeof(*boot_params));
53 } 86 }
54} 87}
55 88
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 998c2cc08363..e880f2408e29 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -281,6 +281,8 @@
281#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */ 281#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
282#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */ 282#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
283#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */ 283#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
284#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
285#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
284 286
285/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ 287/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
286#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ 288#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
@@ -394,5 +396,6 @@
394#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ 396#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
395#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ 397#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
396#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ 398#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
399#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
397 400
398#endif /* _ASM_X86_CPUFEATURES_H */ 401#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 287f1f7b2e52..c38a66661576 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -16,7 +16,6 @@
16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19extern void mcount(void);
20extern atomic_t modifying_ftrace_code; 19extern atomic_t modifying_ftrace_code;
21extern void __fentry__(void); 20extern void __fentry__(void);
22 21
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0278aa66ef62..fe7c205233f1 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -11,6 +11,21 @@
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
13 * that group keep the CPUID for the variants sorted by model number. 13 * that group keep the CPUID for the variants sorted by model number.
14 *
15 * The defined symbol names have the following form:
16 * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
17 * where:
18 * OPTFAMILY Describes the family of CPUs that this belongs to. Default
19 * is assumed to be "_CORE" (and should be omitted). Other values
20 * currently in use are _ATOM and _XEON_PHI
21 * MICROARCH Is the code name for the micro-architecture for this core.
22 * N.B. Not the platform name.
23 * OPTDIFF If needed, a short string to differentiate by market segment.
24 * Exact strings here will vary over time. _DESKTOP, _MOBILE, and
25 * _X (short for Xeon server) should be used when they are
26 * appropriate.
27 *
28 * The #define line may optionally include a comment including platform names.
14 */ 29 */
15 30
16#define INTEL_FAM6_CORE_YONAH 0x0E 31#define INTEL_FAM6_CORE_YONAH 0x0E
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8282b8d41209..74e88e5edd9c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -35,6 +35,8 @@
35#include <asm/kvm_vcpu_regs.h> 35#include <asm/kvm_vcpu_regs.h>
36#include <asm/hyperv-tlfs.h> 36#include <asm/hyperv-tlfs.h>
37 37
38#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
39
38#define KVM_MAX_VCPUS 288 40#define KVM_MAX_VCPUS 288
39#define KVM_SOFT_MAX_VCPUS 240 41#define KVM_SOFT_MAX_VCPUS 240
40#define KVM_MAX_VCPU_ID 1023 42#define KVM_MAX_VCPU_ID 1023
@@ -607,15 +609,16 @@ struct kvm_vcpu_arch {
607 609
608 /* 610 /*
609 * QEMU userspace and the guest each have their own FPU state. 611 * QEMU userspace and the guest each have their own FPU state.
610 * In vcpu_run, we switch between the user, maintained in the 612 * In vcpu_run, we switch between the user and guest FPU contexts.
611 * task_struct struct, and guest FPU contexts. While running a VCPU, 613 * While running a VCPU, the VCPU thread will have the guest FPU
612 * the VCPU thread will have the guest FPU context. 614 * context.
613 * 615 *
614 * Note that while the PKRU state lives inside the fpu registers, 616 * Note that while the PKRU state lives inside the fpu registers,
615 * it is switched out separately at VMENTER and VMEXIT time. The 617 * it is switched out separately at VMENTER and VMEXIT time. The
616 * "guest_fpu" state here contains the guest FPU context, with the 618 * "guest_fpu" state here contains the guest FPU context, with the
617 * host PRKU bits. 619 * host PRKU bits.
618 */ 620 */
621 struct fpu *user_fpu;
619 struct fpu *guest_fpu; 622 struct fpu *guest_fpu;
620 623
621 u64 xcr0; 624 u64 xcr0;
@@ -1174,6 +1177,7 @@ struct kvm_x86_ops {
1174 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, 1177 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1175 uint32_t guest_irq, bool set); 1178 uint32_t guest_irq, bool set);
1176 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); 1179 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1180 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1177 1181
1178 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, 1182 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1179 bool *expired); 1183 bool *expired);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6b4fc2788078..271d837d69a8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -381,6 +381,7 @@
381#define MSR_AMD64_PATCH_LEVEL 0x0000008b 381#define MSR_AMD64_PATCH_LEVEL 0x0000008b
382#define MSR_AMD64_TSC_RATIO 0xc0000104 382#define MSR_AMD64_TSC_RATIO 0xc0000104
383#define MSR_AMD64_NB_CFG 0xc001001f 383#define MSR_AMD64_NB_CFG 0xc001001f
384#define MSR_AMD64_CPUID_FN_1 0xc0011004
384#define MSR_AMD64_PATCH_LOADER 0xc0010020 385#define MSR_AMD64_PATCH_LOADER 0xc0010020
385#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 386#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
386#define MSR_AMD64_OSVW_STATUS 0xc0010141 387#define MSR_AMD64_OSVW_STATUS 0xc0010141
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 109f974f9835..80bc209c0708 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -192,7 +192,7 @@
192 " lfence;\n" \ 192 " lfence;\n" \
193 " jmp 902b;\n" \ 193 " jmp 902b;\n" \
194 " .align 16\n" \ 194 " .align 16\n" \
195 "903: addl $4, %%esp;\n" \ 195 "903: lea 4(%%esp), %%esp;\n" \
196 " pushl %[thunk_target];\n" \ 196 " pushl %[thunk_target];\n" \
197 " ret;\n" \ 197 " ret;\n" \
198 " .align 16\n" \ 198 " .align 16\n" \
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1392d5e6e8d6..ee26e9215f18 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -252,16 +252,20 @@ struct pebs_lbr {
252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
253#define IBSCTL_LVT_OFFSET_MASK 0x0F 253#define IBSCTL_LVT_OFFSET_MASK 0x0F
254 254
255/* ibs fetch bits/masks */ 255/* IBS fetch bits/masks */
256#define IBS_FETCH_RAND_EN (1ULL<<57) 256#define IBS_FETCH_RAND_EN (1ULL<<57)
257#define IBS_FETCH_VAL (1ULL<<49) 257#define IBS_FETCH_VAL (1ULL<<49)
258#define IBS_FETCH_ENABLE (1ULL<<48) 258#define IBS_FETCH_ENABLE (1ULL<<48)
259#define IBS_FETCH_CNT 0xFFFF0000ULL 259#define IBS_FETCH_CNT 0xFFFF0000ULL
260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
261 261
262/* ibs op bits/masks */ 262/*
263/* lower 4 bits of the current count are ignored: */ 263 * IBS op bits/masks
264#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) 264 * The lower 7 bits of the current count are random bits
265 * preloaded by hardware and ignored in software
266 */
267#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
268#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
265#define IBS_OP_CNT_CTL (1ULL<<19) 269#define IBS_OP_CNT_CTL (1ULL<<19)
266#define IBS_OP_VAL (1ULL<<18) 270#define IBS_OP_VAL (1ULL<<18)
267#define IBS_OP_ENABLE (1ULL<<17) 271#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index ae91429129a6..ba71a63cdac4 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -96,6 +96,8 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
96 96
97#else 97#else
98 98
99#define VDSO_HAS_32BIT_FALLBACK 1
100
99static __always_inline 101static __always_inline
100long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 102long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
101{ 103{
@@ -114,6 +116,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
114} 116}
115 117
116static __always_inline 118static __always_inline
119long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
120{
121 long ret;
122
123 asm (
124 "mov %%ebx, %%edx \n"
125 "mov %[clock], %%ebx \n"
126 "call __kernel_vsyscall \n"
127 "mov %%edx, %%ebx \n"
128 : "=a" (ret), "=m" (*_ts)
129 : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)
130 : "edx");
131
132 return ret;
133}
134
135static __always_inline
117long gettimeofday_fallback(struct __kernel_old_timeval *_tv, 136long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
118 struct timezone *_tz) 137 struct timezone *_tz)
119{ 138{
@@ -148,6 +167,23 @@ clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
148 return ret; 167 return ret;
149} 168}
150 169
170static __always_inline
171long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
172{
173 long ret;
174
175 asm (
176 "mov %%ebx, %%edx \n"
177 "mov %[clock], %%ebx \n"
178 "call __kernel_vsyscall \n"
179 "mov %%edx, %%ebx \n"
180 : "=a" (ret), "=m" (*_ts)
181 : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)
182 : "edx");
183
184 return ret;
185}
186
151#endif 187#endif
152 188
153#ifdef CONFIG_PARAVIRT_CLOCK 189#ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/arch/x86/include/uapi/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h
index 484e3cfd7ef2..149143cab9ff 100644
--- a/arch/x86/include/uapi/asm/byteorder.h
+++ b/arch/x86/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_BYTEORDER_H 2#ifndef _ASM_X86_BYTEORDER_H
3#define _ASM_X86_BYTEORDER_H 3#define _ASM_X86_BYTEORDER_H
4 4
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 6ebaae90e207..8b2effe6efb8 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_HWCAP2_H 2#ifndef _ASM_X86_HWCAP2_H
3#define _ASM_X86_HWCAP2_H 3#define _ASM_X86_HWCAP2_H
4 4
diff --git a/arch/x86/include/uapi/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h
index 6b18e88de8a6..7114801d0499 100644
--- a/arch/x86/include/uapi/asm/sigcontext32.h
+++ b/arch/x86/include/uapi/asm/sigcontext32.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_SIGCONTEXT32_H 2#ifndef _ASM_X86_SIGCONTEXT32_H
3#define _ASM_X86_SIGCONTEXT32_H 3#define _ASM_X86_SIGCONTEXT32_H
4 4
diff --git a/arch/x86/include/uapi/asm/types.h b/arch/x86/include/uapi/asm/types.h
index df55e1ddb0c9..9d5c11a24279 100644
--- a/arch/x86/include/uapi/asm/types.h
+++ b/arch/x86/include/uapi/asm/types.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_TYPES_H 2#ifndef _ASM_X86_TYPES_H
3#define _ASM_X86_TYPES_H 3#define _ASM_X86_TYPES_H
4 4
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f5291362da1a..dba2828b779a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; 722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
723 723
724/* 724/*
725 * Temporary interrupt handler. 725 * Temporary interrupt handler and polled calibration function.
726 */ 726 */
727static void __init lapic_cal_handler(struct clock_event_device *dev) 727static void __init lapic_cal_handler(struct clock_event_device *dev)
728{ 728{
@@ -851,7 +851,8 @@ bool __init apic_needs_pit(void)
851static int __init calibrate_APIC_clock(void) 851static int __init calibrate_APIC_clock(void)
852{ 852{
853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
854 void (*real_handler)(struct clock_event_device *dev); 854 u64 tsc_perj = 0, tsc_start = 0;
855 unsigned long jif_start;
855 unsigned long deltaj; 856 unsigned long deltaj;
856 long delta, deltatsc; 857 long delta, deltatsc;
857 int pm_referenced = 0; 858 int pm_referenced = 0;
@@ -878,28 +879,64 @@ static int __init calibrate_APIC_clock(void)
878 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" 879 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
879 "calibrating APIC timer ...\n"); 880 "calibrating APIC timer ...\n");
880 881
882 /*
883 * There are platforms w/o global clockevent devices. Instead of
884 * making the calibration conditional on that, use a polling based
885 * approach everywhere.
886 */
881 local_irq_disable(); 887 local_irq_disable();
882 888
883 /* Replace the global interrupt handler */
884 real_handler = global_clock_event->event_handler;
885 global_clock_event->event_handler = lapic_cal_handler;
886
887 /* 889 /*
888 * Setup the APIC counter to maximum. There is no way the lapic 890 * Setup the APIC counter to maximum. There is no way the lapic
889 * can underflow in the 100ms detection time frame 891 * can underflow in the 100ms detection time frame
890 */ 892 */
891 __setup_APIC_LVTT(0xffffffff, 0, 0); 893 __setup_APIC_LVTT(0xffffffff, 0, 0);
892 894
893 /* Let the interrupts run */ 895 /*
896 * Methods to terminate the calibration loop:
897 * 1) Global clockevent if available (jiffies)
898 * 2) TSC if available and frequency is known
899 */
900 jif_start = READ_ONCE(jiffies);
901
902 if (tsc_khz) {
903 tsc_start = rdtsc();
904 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
905 }
906
907 /*
908 * Enable interrupts so the tick can fire, if a global
909 * clockevent device is available
910 */
894 local_irq_enable(); 911 local_irq_enable();
895 912
896 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) 913 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
897 cpu_relax(); 914 /* Wait for a tick to elapse */
915 while (1) {
916 if (tsc_khz) {
917 u64 tsc_now = rdtsc();
918 if ((tsc_now - tsc_start) >= tsc_perj) {
919 tsc_start += tsc_perj;
920 break;
921 }
922 } else {
923 unsigned long jif_now = READ_ONCE(jiffies);
898 924
899 local_irq_disable(); 925 if (time_after(jif_now, jif_start)) {
926 jif_start = jif_now;
927 break;
928 }
929 }
930 cpu_relax();
931 }
900 932
901 /* Restore the real event handler */ 933 /* Invoke the calibration routine */
902 global_clock_event->event_handler = real_handler; 934 local_irq_disable();
935 lapic_cal_handler(NULL);
936 local_irq_enable();
937 }
938
939 local_irq_disable();
903 940
904 /* Build delta t1-t2 as apic timer counts down */ 941 /* Build delta t1-t2 as apic timer counts down */
905 delta = lapic_cal_t1 - lapic_cal_t2; 942 delta = lapic_cal_t1 - lapic_cal_t2;
@@ -943,10 +980,11 @@ static int __init calibrate_APIC_clock(void)
943 levt->features &= ~CLOCK_EVT_FEAT_DUMMY; 980 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
944 981
945 /* 982 /*
946 * PM timer calibration failed or not turned on 983 * PM timer calibration failed or not turned on so lets try APIC
947 * so lets try APIC timer based calibration 984 * timer based calibration, if a global clockevent device is
985 * available.
948 */ 986 */
949 if (!pm_referenced) { 987 if (!pm_referenced && global_clock_event) {
950 apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); 988 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
951 989
952 /* 990 /*
@@ -1141,6 +1179,10 @@ void clear_local_APIC(void)
1141 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 1179 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1142 v = apic_read(APIC_LVT1); 1180 v = apic_read(APIC_LVT1);
1143 apic_write(APIC_LVT1, v | APIC_LVT_MASKED); 1181 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1182 if (!x2apic_enabled()) {
1183 v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1184 apic_write(APIC_LDR, v);
1185 }
1144 if (maxlvt >= 4) { 1186 if (maxlvt >= 4) {
1145 v = apic_read(APIC_LVTPC); 1187 v = apic_read(APIC_LVTPC);
1146 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); 1188 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index afee386ff711..caedd8d60d36 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
38 return early_per_cpu(x86_cpu_to_apicid, cpu); 38 return early_per_cpu(x86_cpu_to_apicid, cpu);
39} 39}
40 40
41static inline unsigned long calculate_ldr(int cpu)
42{
43 unsigned long val, id;
44
45 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
46 id = per_cpu(x86_bios_cpu_apicid, cpu);
47 val |= SET_APIC_LOGICAL_ID(id);
48
49 return val;
50}
51
52/* 41/*
53 * Set up the logical destination ID. 42 * bigsmp enables physical destination mode
54 * 43 * and doesn't use LDR and DFR
55 * Intel recommends to set DFR, LDR and TPR before enabling
56 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
57 * document number 292116). So here it goes...
58 */ 44 */
59static void bigsmp_init_apic_ldr(void) 45static void bigsmp_init_apic_ldr(void)
60{ 46{
61 unsigned long val;
62 int cpu = smp_processor_id();
63
64 apic_write(APIC_DFR, APIC_DFR_FLAT);
65 val = calculate_ldr(cpu);
66 apic_write(APIC_LDR, val);
67} 47}
68 48
69static void bigsmp_setup_apic_routing(void) 49static void bigsmp_setup_apic_routing(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c7bb6c69f21c..d6af97fd170a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
2438 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use 2438 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
2439 * gsi_top if ioapic_dynirq_base hasn't been initialized yet. 2439 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
2440 */ 2440 */
2441 return ioapic_initialized ? ioapic_dynirq_base : gsi_top; 2441 if (!ioapic_initialized)
2442 return gsi_top;
2443 /*
2444 * For DT enabled machines ioapic_dynirq_base is irrelevant and not
2445 * updated. So simply return @from if ioapic_dynirq_base == 0.
2446 */
2447 return ioapic_dynirq_base ? : from;
2442} 2448}
2443 2449
2444#ifdef CONFIG_X86_32 2450#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1492799b8f43..ee2d91e382f1 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -184,7 +184,8 @@ void __init default_setup_apic_routing(void)
184 def_to_bigsmp = 0; 184 def_to_bigsmp = 0;
185 break; 185 break;
186 } 186 }
187 /* If P4 and above fall through */ 187 /* P4 and above */
188 /* fall through */
188 case X86_VENDOR_HYGON: 189 case X86_VENDOR_HYGON:
189 case X86_VENDOR_AMD: 190 case X86_VENDOR_AMD:
190 def_to_bigsmp = 1; 191 def_to_bigsmp = 1;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8d4e50428b68..68c363c341bf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
804 msr_set_bit(MSR_AMD64_DE_CFG, 31); 804 msr_set_bit(MSR_AMD64_DE_CFG, 31);
805} 805}
806 806
807static bool rdrand_force;
808
809static int __init rdrand_cmdline(char *str)
810{
811 if (!str)
812 return -EINVAL;
813
814 if (!strcmp(str, "force"))
815 rdrand_force = true;
816 else
817 return -EINVAL;
818
819 return 0;
820}
821early_param("rdrand", rdrand_cmdline);
822
823static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
824{
825 /*
826 * Saving of the MSR used to hide the RDRAND support during
827 * suspend/resume is done by arch/x86/power/cpu.c, which is
828 * dependent on CONFIG_PM_SLEEP.
829 */
830 if (!IS_ENABLED(CONFIG_PM_SLEEP))
831 return;
832
833 /*
834 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
835 * RDRAND support using the CPUID function directly.
836 */
837 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
838 return;
839
840 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
841
842 /*
843 * Verify that the CPUID change has occurred in case the kernel is
844 * running virtualized and the hypervisor doesn't support the MSR.
845 */
846 if (cpuid_ecx(1) & BIT(30)) {
847 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
848 return;
849 }
850
851 clear_cpu_cap(c, X86_FEATURE_RDRAND);
852 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
853}
854
855static void init_amd_jg(struct cpuinfo_x86 *c)
856{
857 /*
858 * Some BIOS implementations do not restore proper RDRAND support
859 * across suspend and resume. Check on whether to hide the RDRAND
860 * instruction support via CPUID.
861 */
862 clear_rdrand_cpuid_bit(c);
863}
864
807static void init_amd_bd(struct cpuinfo_x86 *c) 865static void init_amd_bd(struct cpuinfo_x86 *c)
808{ 866{
809 u64 value; 867 u64 value;
@@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
818 wrmsrl_safe(MSR_F15H_IC_CFG, value); 876 wrmsrl_safe(MSR_F15H_IC_CFG, value);
819 } 877 }
820 } 878 }
879
880 /*
881 * Some BIOS implementations do not restore proper RDRAND support
882 * across suspend and resume. Check on whether to hide the RDRAND
883 * instruction support via CPUID.
884 */
885 clear_rdrand_cpuid_bit(c);
821} 886}
822 887
823static void init_amd_zn(struct cpuinfo_x86 *c) 888static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
860 case 0x10: init_amd_gh(c); break; 925 case 0x10: init_amd_gh(c); break;
861 case 0x12: init_amd_ln(c); break; 926 case 0x12: init_amd_ln(c); break;
862 case 0x15: init_amd_bd(c); break; 927 case 0x15: init_amd_bd(c); break;
928 case 0x16: init_amd_jg(c); break;
863 case 0x17: init_amd_zn(c); break; 929 case 0x17: init_amd_zn(c); break;
864 } 930 }
865 931
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 66ca906aa790..c6fa3ef10b4e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -34,6 +34,7 @@
34 34
35#include "cpu.h" 35#include "cpu.h"
36 36
37static void __init spectre_v1_select_mitigation(void);
37static void __init spectre_v2_select_mitigation(void); 38static void __init spectre_v2_select_mitigation(void);
38static void __init ssb_select_mitigation(void); 39static void __init ssb_select_mitigation(void);
39static void __init l1tf_select_mitigation(void); 40static void __init l1tf_select_mitigation(void);
@@ -98,17 +99,11 @@ void __init check_bugs(void)
98 if (boot_cpu_has(X86_FEATURE_STIBP)) 99 if (boot_cpu_has(X86_FEATURE_STIBP))
99 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; 100 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
100 101
101 /* Select the proper spectre mitigation before patching alternatives */ 102 /* Select the proper CPU mitigations before patching alternatives: */
103 spectre_v1_select_mitigation();
102 spectre_v2_select_mitigation(); 104 spectre_v2_select_mitigation();
103
104 /*
105 * Select proper mitigation for any exposure to the Speculative Store
106 * Bypass vulnerability.
107 */
108 ssb_select_mitigation(); 105 ssb_select_mitigation();
109
110 l1tf_select_mitigation(); 106 l1tf_select_mitigation();
111
112 mds_select_mitigation(); 107 mds_select_mitigation();
113 108
114 arch_smt_update(); 109 arch_smt_update();
@@ -274,6 +269,98 @@ static int __init mds_cmdline(char *str)
274early_param("mds", mds_cmdline); 269early_param("mds", mds_cmdline);
275 270
276#undef pr_fmt 271#undef pr_fmt
272#define pr_fmt(fmt) "Spectre V1 : " fmt
273
274enum spectre_v1_mitigation {
275 SPECTRE_V1_MITIGATION_NONE,
276 SPECTRE_V1_MITIGATION_AUTO,
277};
278
279static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
280 SPECTRE_V1_MITIGATION_AUTO;
281
282static const char * const spectre_v1_strings[] = {
283 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
284 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
285};
286
287/*
288 * Does SMAP provide full mitigation against speculative kernel access to
289 * userspace?
290 */
291static bool smap_works_speculatively(void)
292{
293 if (!boot_cpu_has(X86_FEATURE_SMAP))
294 return false;
295
296 /*
297 * On CPUs which are vulnerable to Meltdown, SMAP does not
298 * prevent speculative access to user data in the L1 cache.
299 * Consider SMAP to be non-functional as a mitigation on these
300 * CPUs.
301 */
302 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
303 return false;
304
305 return true;
306}
307
308static void __init spectre_v1_select_mitigation(void)
309{
310 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
311 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
312 return;
313 }
314
315 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
316 /*
317 * With Spectre v1, a user can speculatively control either
318 * path of a conditional swapgs with a user-controlled GS
319 * value. The mitigation is to add lfences to both code paths.
320 *
321 * If FSGSBASE is enabled, the user can put a kernel address in
322 * GS, in which case SMAP provides no protection.
323 *
324 * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
325 * FSGSBASE enablement patches have been merged. ]
326 *
327 * If FSGSBASE is disabled, the user can only put a user space
328 * address in GS. That makes an attack harder, but still
329 * possible if there's no SMAP protection.
330 */
331 if (!smap_works_speculatively()) {
332 /*
333 * Mitigation can be provided from SWAPGS itself or
334 * PTI as the CR3 write in the Meltdown mitigation
335 * is serializing.
336 *
337 * If neither is there, mitigate with an LFENCE to
338 * stop speculation through swapgs.
339 */
340 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
341 !boot_cpu_has(X86_FEATURE_PTI))
342 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
343
344 /*
345 * Enable lfences in the kernel entry (non-swapgs)
346 * paths, to prevent user entry from speculatively
347 * skipping swapgs.
348 */
349 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
350 }
351 }
352
353 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
354}
355
356static int __init nospectre_v1_cmdline(char *str)
357{
358 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
359 return 0;
360}
361early_param("nospectre_v1", nospectre_v1_cmdline);
362
363#undef pr_fmt
277#define pr_fmt(fmt) "Spectre V2 : " fmt 364#define pr_fmt(fmt) "Spectre V2 : " fmt
278 365
279static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = 366static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
@@ -1226,7 +1313,7 @@ static ssize_t l1tf_show_state(char *buf)
1226 1313
1227static ssize_t mds_show_state(char *buf) 1314static ssize_t mds_show_state(char *buf)
1228{ 1315{
1229 if (!hypervisor_is_type(X86_HYPER_NATIVE)) { 1316 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1230 return sprintf(buf, "%s; SMT Host state unknown\n", 1317 return sprintf(buf, "%s; SMT Host state unknown\n",
1231 mds_strings[mds_mitigation]); 1318 mds_strings[mds_mitigation]);
1232 } 1319 }
@@ -1290,7 +1377,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
1290 break; 1377 break;
1291 1378
1292 case X86_BUG_SPECTRE_V1: 1379 case X86_BUG_SPECTRE_V1:
1293 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 1380 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1294 1381
1295 case X86_BUG_SPECTRE_V2: 1382 case X86_BUG_SPECTRE_V2:
1296 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 1383 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 11472178e17f..f125bf7ecb6f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1022,6 +1022,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1022#define NO_L1TF BIT(3) 1022#define NO_L1TF BIT(3)
1023#define NO_MDS BIT(4) 1023#define NO_MDS BIT(4)
1024#define MSBDS_ONLY BIT(5) 1024#define MSBDS_ONLY BIT(5)
1025#define NO_SWAPGS BIT(6)
1025 1026
1026#define VULNWL(_vendor, _family, _model, _whitelist) \ 1027#define VULNWL(_vendor, _family, _model, _whitelist) \
1027 { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } 1028 { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1048,30 +1049,38 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1048 VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), 1049 VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
1049 VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), 1050 VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
1050 1051
1051 VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), 1052 VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1052 VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY), 1053 VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1053 VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY), 1054 VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1054 VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), 1055 VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1055 VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY), 1056 VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1056 VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY), 1057 VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1057 1058
1058 VULNWL_INTEL(CORE_YONAH, NO_SSB), 1059 VULNWL_INTEL(CORE_YONAH, NO_SSB),
1059 1060
1060 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY), 1061 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
1061 1062
1062 VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF), 1063 VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
1063 VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF), 1064 VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
1064 VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF), 1065 VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
1066
1067 /*
1068 * Technically, swapgs isn't serializing on AMD (despite it previously
1069 * being documented as such in the APM). But according to AMD, %gs is
1070 * updated non-speculatively, and the issuing of %gs-relative memory
1071 * operands will be blocked until the %gs update completes, which is
1072 * good enough for our purposes.
1073 */
1065 1074
1066 /* AMD Family 0xf - 0x12 */ 1075 /* AMD Family 0xf - 0x12 */
1067 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1076 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
1068 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1077 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
1069 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1078 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
1070 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1079 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
1071 1080
1072 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ 1081 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1073 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), 1082 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
1074 VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), 1083 VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
1075 {} 1084 {}
1076}; 1085};
1077 1086
@@ -1108,6 +1117,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1108 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); 1117 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1109 } 1118 }
1110 1119
1120 if (!cpu_matches(NO_SWAPGS))
1121 setup_force_cpu_bug(X86_BUG_SWAPGS);
1122
1111 if (cpu_matches(NO_MELTDOWN)) 1123 if (cpu_matches(NO_MELTDOWN))
1112 return; 1124 return;
1113 1125
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 4296c702a3f7..72182809b333 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -98,6 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
98 case 7: 98 case 7:
99 if (size < 0x40) 99 if (size < 0x40)
100 break; 100 break;
101 /* Else, fall through */
101 case 6: 102 case 6:
102 case 5: 103 case 5:
103 case 4: 104 case 4:
diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c
index 6a204e7336c1..32b4dc9030aa 100644
--- a/arch/x86/kernel/cpu/umwait.c
+++ b/arch/x86/kernel/cpu/umwait.c
@@ -18,6 +18,12 @@
18static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); 18static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
19 19
20/* 20/*
21 * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
22 * hardware or BIOS before kernel boot.
23 */
24static u32 orig_umwait_control_cached __ro_after_init;
25
26/*
21 * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in 27 * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
22 * the sysfs write functions. 28 * the sysfs write functions.
23 */ 29 */
@@ -53,6 +59,23 @@ static int umwait_cpu_online(unsigned int cpu)
53} 59}
54 60
55/* 61/*
62 * The CPU hotplug callback sets the control MSR to the original control
63 * value.
64 */
65static int umwait_cpu_offline(unsigned int cpu)
66{
67 /*
68 * This code is protected by the CPU hotplug already and
69 * orig_umwait_control_cached is never changed after it caches
70 * the original control MSR value in umwait_init(). So there
71 * is no race condition here.
72 */
73 wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
74
75 return 0;
76}
77
78/*
56 * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which 79 * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
57 * is the only active CPU at this time. The MSR is set up on the APs via the 80 * is the only active CPU at this time. The MSR is set up on the APs via the
58 * CPU hotplug callback. 81 * CPU hotplug callback.
@@ -185,8 +208,22 @@ static int __init umwait_init(void)
185 if (!boot_cpu_has(X86_FEATURE_WAITPKG)) 208 if (!boot_cpu_has(X86_FEATURE_WAITPKG))
186 return -ENODEV; 209 return -ENODEV;
187 210
211 /*
212 * Cache the original control MSR value before the control MSR is
213 * changed. This is the only place where orig_umwait_control_cached
214 * is modified.
215 */
216 rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
217
188 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", 218 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
189 umwait_cpu_online, NULL); 219 umwait_cpu_online, umwait_cpu_offline);
220 if (ret < 0) {
221 /*
222 * On failure, the control MSR on all CPUs has the
223 * original control value.
224 */
225 return ret;
226 }
190 227
191 register_syscore_ops(&umwait_syscore_ops); 228 register_syscore_ops(&umwait_syscore_ops);
192 229
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a6342c899be5..f3d3e9646a99 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -193,10 +193,10 @@ ENTRY(secondary_startup_64)
193 193
194 /* Set up %gs. 194 /* Set up %gs.
195 * 195 *
196 * The base of %gs always points to the bottom of the irqstack 196 * The base of %gs always points to fixed_percpu_data. If the
197 * union. If the stack protector canary is enabled, it is 197 * stack protector canary is enabled, it is located at %gs:40.
198 * located at %gs:40. Note that, on SMP, the boot cpu uses 198 * Note that, on SMP, the boot cpu uses init data section until
199 * init data section till per cpu areas are set up. 199 * the per cpu areas are set up.
200 */ 200 */
201 movl $MSR_GS_BASE,%ecx 201 movl $MSR_GS_BASE,%ecx
202 movl initial_gs(%rip),%eax 202 movl initial_gs(%rip),%eax
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index c43e96a938d0..c6f791bc481e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -827,10 +827,6 @@ int __init hpet_enable(void)
827 if (!hpet_cfg_working()) 827 if (!hpet_cfg_working())
828 goto out_nohpet; 828 goto out_nohpet;
829 829
830 /* Validate that the counter is counting */
831 if (!hpet_counting())
832 goto out_nohpet;
833
834 /* 830 /*
835 * Read the period and check for a sane value: 831 * Read the period and check for a sane value:
836 */ 832 */
@@ -896,6 +892,14 @@ int __init hpet_enable(void)
896 } 892 }
897 hpet_print_config(); 893 hpet_print_config();
898 894
895 /*
896 * Validate that the counter is counting. This needs to be done
897 * after sanitizing the config registers to properly deal with
898 * force enabled HPETs.
899 */
900 if (!hpet_counting())
901 goto out_nohpet;
902
899 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); 903 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
900 904
901 if (id & HPET_ID_LEGSUP) { 905 if (id & HPET_ID_LEGSUP) {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b7f34fe2171e..4ab377c9fffe 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -308,9 +308,6 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
308 308
309static void kvm_guest_cpu_init(void) 309static void kvm_guest_cpu_init(void)
310{ 310{
311 if (!kvm_para_available())
312 return;
313
314 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 311 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
315 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); 312 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
316 313
@@ -625,9 +622,6 @@ static void __init kvm_guest_init(void)
625{ 622{
626 int i; 623 int i;
627 624
628 if (!kvm_para_available())
629 return;
630
631 paravirt_ops_setup(); 625 paravirt_ops_setup();
632 register_reboot_notifier(&kvm_pv_reboot_nb); 626 register_reboot_notifier(&kvm_pv_reboot_nb);
633 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) 627 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
@@ -848,8 +842,6 @@ asm(
848 */ 842 */
849void __init kvm_spinlock_init(void) 843void __init kvm_spinlock_init(void)
850{ 844{
851 if (!kvm_para_available())
852 return;
853 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ 845 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
854 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 846 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
855 return; 847 return;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 0fdbe89d0754..3c5bbe8e4120 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -201,6 +201,7 @@ static int set_segment_reg(struct task_struct *task,
201 case offsetof(struct user_regs_struct, ss): 201 case offsetof(struct user_regs_struct, ss):
202 if (unlikely(value == 0)) 202 if (unlikely(value == 0))
203 return -EIO; 203 return -EIO;
204 /* Else, fall through */
204 205
205 default: 206 default:
206 *pt_regs_access(task_pt_regs(task), offset) = value; 207 *pt_regs_access(task_pt_regs(task), offset) = value;
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 4f36d3241faf..2d6898c2cb64 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
100{ 100{
101 int ret; 101 int ret;
102 102
103 if (!access_ok(fp, sizeof(*frame))) 103 if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
104 return 0; 104 return 0;
105 105
106 ret = 1; 106 ret = 1;
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 8eb67a670b10..653b7f617b61 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
230 {}, 230 {},
231}; 231};
232 232
233/*
234 * Some devices have a portrait LCD but advertise a landscape resolution (and
235 * pitch). We simply swap width and height for these devices so that we can
236 * correctly deal with some of them coming with multiple resolutions.
237 */
238static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
239 {
240 /*
241 * Lenovo MIIX310-10ICR, only some batches have the troublesome
242 * 800x1280 portrait screen. Luckily the portrait version has
243 * its own BIOS version, so we match on that.
244 */
245 .matches = {
246 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
247 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
248 DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
249 },
250 },
251 {
252 /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
253 .matches = {
254 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
255 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
256 "Lenovo MIIX 320-10ICR"),
257 },
258 },
259 {
260 /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
261 .matches = {
262 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
263 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
264 "Lenovo ideapad D330-10IGM"),
265 },
266 },
267 {},
268};
269
233__init void sysfb_apply_efi_quirks(void) 270__init void sysfb_apply_efi_quirks(void)
234{ 271{
235 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || 272 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
236 !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) 273 !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
237 dmi_check_system(efifb_dmi_system_table); 274 dmi_check_system(efifb_dmi_system_table);
275
276 if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
277 dmi_check_system(efifb_dmi_swap_width_height)) {
278 u16 temp = screen_info.lfb_width;
279
280 screen_info.lfb_width = screen_info.lfb_height;
281 screen_info.lfb_height = temp;
282 screen_info.lfb_linelength = 4 * screen_info.lfb_width;
283 }
238} 284}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index d8359ebeea70..8cd745ef8c7b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -508,9 +508,12 @@ struct uprobe_xol_ops {
508 void (*abort)(struct arch_uprobe *, struct pt_regs *); 508 void (*abort)(struct arch_uprobe *, struct pt_regs *);
509}; 509};
510 510
511static inline int sizeof_long(void) 511static inline int sizeof_long(struct pt_regs *regs)
512{ 512{
513 return in_ia32_syscall() ? 4 : 8; 513 /*
514 * Check registers for mode as in_xxx_syscall() does not apply here.
515 */
516 return user_64bit_mode(regs) ? 8 : 4;
514} 517}
515 518
516static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 519static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
521 524
522static int emulate_push_stack(struct pt_regs *regs, unsigned long val) 525static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
523{ 526{
524 unsigned long new_sp = regs->sp - sizeof_long(); 527 unsigned long new_sp = regs->sp - sizeof_long(regs);
525 528
526 if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) 529 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
527 return -EFAULT; 530 return -EFAULT;
528 531
529 regs->sp = new_sp; 532 regs->sp = new_sp;
@@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
556 long correction = utask->vaddr - utask->xol_vaddr; 559 long correction = utask->vaddr - utask->xol_vaddr;
557 regs->ip += correction; 560 regs->ip += correction;
558 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { 561 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
559 regs->sp += sizeof_long(); /* Pop incorrect return address */ 562 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
560 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) 563 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
561 return -ERESTART; 564 return -ERESTART;
562 } 565 }
@@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
675 * "call" insn was executed out-of-line. Just restore ->sp and restart. 678 * "call" insn was executed out-of-line. Just restore ->sp and restart.
676 * We could also restore ->ip and try to call branch_emulate_op() again. 679 * We could also restore ->ip and try to call branch_emulate_op() again.
677 */ 680 */
678 regs->sp += sizeof_long(); 681 regs->sp += sizeof_long(regs);
679 return -ERESTART; 682 return -ERESTART;
680} 683}
681 684
@@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1056unsigned long 1059unsigned long
1057arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) 1060arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1058{ 1061{
1059 int rasize = sizeof_long(), nleft; 1062 int rasize = sizeof_long(regs), nleft;
1060 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ 1063 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1061 1064
1062 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) 1065 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 329361b69d5e..018aebce33ff 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -8,11 +8,6 @@
8#include <linux/debugfs.h> 8#include <linux/debugfs.h>
9#include "lapic.h" 9#include "lapic.h"
10 10
11bool kvm_arch_has_vcpu_debugfs(void)
12{
13 return true;
14}
15
16static int vcpu_get_timer_advance_ns(void *data, u64 *val) 11static int vcpu_get_timer_advance_ns(void *data, u64 *val)
17{ 12{
18 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data; 13 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
@@ -48,37 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
48 43
49DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n"); 44DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
50 45
51int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 46void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
52{ 47{
53 struct dentry *ret; 48 debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu,
54 49 &vcpu_tsc_offset_fops);
55 ret = debugfs_create_file("tsc-offset", 0444,
56 vcpu->debugfs_dentry,
57 vcpu, &vcpu_tsc_offset_fops);
58 if (!ret)
59 return -ENOMEM;
60 50
61 if (lapic_in_kernel(vcpu)) { 51 if (lapic_in_kernel(vcpu))
62 ret = debugfs_create_file("lapic_timer_advance_ns", 0444, 52 debugfs_create_file("lapic_timer_advance_ns", 0444,
63 vcpu->debugfs_dentry, 53 vcpu->debugfs_dentry, vcpu,
64 vcpu, &vcpu_timer_advance_ns_fops); 54 &vcpu_timer_advance_ns_fops);
65 if (!ret)
66 return -ENOMEM;
67 }
68 55
69 if (kvm_has_tsc_control) { 56 if (kvm_has_tsc_control) {
70 ret = debugfs_create_file("tsc-scaling-ratio", 0444, 57 debugfs_create_file("tsc-scaling-ratio", 0444,
71 vcpu->debugfs_dentry, 58 vcpu->debugfs_dentry, vcpu,
72 vcpu, &vcpu_tsc_scaling_fops); 59 &vcpu_tsc_scaling_fops);
73 if (!ret) 60 debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
74 return -ENOMEM; 61 vcpu->debugfs_dentry, vcpu,
75 ret = debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444, 62 &vcpu_tsc_scaling_frac_fops);
76 vcpu->debugfs_dentry,
77 vcpu, &vcpu_tsc_scaling_frac_fops);
78 if (!ret)
79 return -ENOMEM;
80
81 } 63 }
82
83 return 0;
84} 64}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c10a8b10b203..fff790a3f4ee 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, 1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1782 struct kvm_cpuid_entry2 __user *entries) 1782 struct kvm_cpuid_entry2 __user *entries)
1783{ 1783{
1784 uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); 1784 uint16_t evmcs_ver = 0;
1785 struct kvm_cpuid_entry2 cpuid_entries[] = { 1785 struct kvm_cpuid_entry2 cpuid_entries[] = {
1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, 1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1787 { .function = HYPERV_CPUID_INTERFACE }, 1787 { .function = HYPERV_CPUID_INTERFACE },
@@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1793 }; 1793 };
1794 int i, nent = ARRAY_SIZE(cpuid_entries); 1794 int i, nent = ARRAY_SIZE(cpuid_entries);
1795 1795
1796 if (kvm_x86_ops->nested_get_evmcs_version)
1797 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1798
1796 /* Skip NESTED_FEATURES if eVMCS is not supported */ 1799 /* Skip NESTED_FEATURES if eVMCS is not supported */
1797 if (!evmcs_ver) 1800 if (!evmcs_ver)
1798 --nent; 1801 --nent;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0aa158657f20..e904ff06a83d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) 216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
217 new->phys_map[xapic_id] = apic; 217 new->phys_map[xapic_id] = apic;
218 218
219 if (!kvm_apic_sw_enabled(apic))
220 continue;
221
219 ldr = kvm_lapic_get_reg(apic, APIC_LDR); 222 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
220 223
221 if (apic_x2apic_mode(apic)) { 224 if (apic_x2apic_mode(apic)) {
@@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
258 static_key_slow_dec_deferred(&apic_sw_disabled); 261 static_key_slow_dec_deferred(&apic_sw_disabled);
259 else 262 else
260 static_key_slow_inc(&apic_sw_disabled.key); 263 static_key_slow_inc(&apic_sw_disabled.key);
264
265 recalculate_apic_map(apic->vcpu->kvm);
261 } 266 }
262} 267}
263 268
@@ -1548,7 +1553,6 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1548static void apic_timer_expired(struct kvm_lapic *apic) 1553static void apic_timer_expired(struct kvm_lapic *apic)
1549{ 1554{
1550 struct kvm_vcpu *vcpu = apic->vcpu; 1555 struct kvm_vcpu *vcpu = apic->vcpu;
1551 struct swait_queue_head *q = &vcpu->wq;
1552 struct kvm_timer *ktimer = &apic->lapic_timer; 1556 struct kvm_timer *ktimer = &apic->lapic_timer;
1553 1557
1554 if (atomic_read(&apic->lapic_timer.pending)) 1558 if (atomic_read(&apic->lapic_timer.pending))
@@ -1566,13 +1570,6 @@ static void apic_timer_expired(struct kvm_lapic *apic)
1566 1570
1567 atomic_inc(&apic->lapic_timer.pending); 1571 atomic_inc(&apic->lapic_timer.pending);
1568 kvm_set_pending_timer(vcpu); 1572 kvm_set_pending_timer(vcpu);
1569
1570 /*
1571 * For x86, the atomic_inc() is serialized, thus
1572 * using swait_active() is safe.
1573 */
1574 if (swait_active(q))
1575 swake_up_one(q);
1576} 1573}
1577 1574
1578static void start_sw_tscdeadline(struct kvm_lapic *apic) 1575static void start_sw_tscdeadline(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f72526e2f68..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3466,7 +3466,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
3466 /* 3466 /*
3467 * Currently, fast page fault only works for direct mapping 3467 * Currently, fast page fault only works for direct mapping
3468 * since the gfn is not stable for indirect shadow page. See 3468 * since the gfn is not stable for indirect shadow page. See
3469 * Documentation/virtual/kvm/locking.txt to get more detail. 3469 * Documentation/virt/kvm/locking.txt to get more detail.
3470 */ 3470 */
3471 fault_handled = fast_pf_fix_direct_spte(vcpu, sp, 3471 fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3472 iterator.sptep, spte, 3472 iterator.sptep, spte,
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5653 struct kvm_memory_slot *slot, 5653 struct kvm_memory_slot *slot,
5654 struct kvm_page_track_notifier_node *node) 5654 struct kvm_page_track_notifier_node *node)
5655{ 5655{
5656 struct kvm_mmu_page *sp; 5656 kvm_mmu_zap_all(kvm);
5657 LIST_HEAD(invalid_list);
5658 unsigned long i;
5659 bool flush;
5660 gfn_t gfn;
5661
5662 spin_lock(&kvm->mmu_lock);
5663
5664 if (list_empty(&kvm->arch.active_mmu_pages))
5665 goto out_unlock;
5666
5667 flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
5668
5669 for (i = 0; i < slot->npages; i++) {
5670 gfn = slot->base_gfn + i;
5671
5672 for_each_valid_sp(kvm, sp, gfn) {
5673 if (sp->gfn != gfn)
5674 continue;
5675
5676 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5677 }
5678 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5679 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5680 flush = false;
5681 cond_resched_lock(&kvm->mmu_lock);
5682 }
5683 }
5684 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5685
5686out_unlock:
5687 spin_unlock(&kvm->mmu_lock);
5688} 5657}
5689 5658
5690void kvm_mmu_init_vm(struct kvm *kvm) 5659void kvm_mmu_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 19f69df96758..e0368076a1ef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1714 if (!entry) 1714 if (!entry)
1715 return -EINVAL; 1715 return -EINVAL;
1716 1716
1717 new_entry = READ_ONCE(*entry);
1718 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & 1717 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1719 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | 1718 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1720 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK); 1719 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
@@ -2143,12 +2142,20 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
2143 goto out; 2142 goto out;
2144 } 2143 }
2145 2144
2145 svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
2146 GFP_KERNEL_ACCOUNT);
2147 if (!svm->vcpu.arch.user_fpu) {
2148 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
2149 err = -ENOMEM;
2150 goto free_partial_svm;
2151 }
2152
2146 svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, 2153 svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
2147 GFP_KERNEL_ACCOUNT); 2154 GFP_KERNEL_ACCOUNT);
2148 if (!svm->vcpu.arch.guest_fpu) { 2155 if (!svm->vcpu.arch.guest_fpu) {
2149 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); 2156 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
2150 err = -ENOMEM; 2157 err = -ENOMEM;
2151 goto free_partial_svm; 2158 goto free_user_fpu;
2152 } 2159 }
2153 2160
2154 err = kvm_vcpu_init(&svm->vcpu, kvm, id); 2161 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
@@ -2211,6 +2218,8 @@ uninit:
2211 kvm_vcpu_uninit(&svm->vcpu); 2218 kvm_vcpu_uninit(&svm->vcpu);
2212free_svm: 2219free_svm:
2213 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); 2220 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2221free_user_fpu:
2222 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2214free_partial_svm: 2223free_partial_svm:
2215 kmem_cache_free(kvm_vcpu_cache, svm); 2224 kmem_cache_free(kvm_vcpu_cache, svm);
2216out: 2225out:
@@ -2241,6 +2250,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2241 __free_page(virt_to_page(svm->nested.hsave)); 2250 __free_page(virt_to_page(svm->nested.hsave));
2242 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); 2251 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2243 kvm_vcpu_uninit(vcpu); 2252 kvm_vcpu_uninit(vcpu);
2253 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2244 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); 2254 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2245 kmem_cache_free(kvm_vcpu_cache, svm); 2255 kmem_cache_free(kvm_vcpu_cache, svm);
2246} 2256}
@@ -5179,6 +5189,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
5179 kvm_vcpu_wake_up(vcpu); 5189 kvm_vcpu_wake_up(vcpu);
5180} 5190}
5181 5191
5192static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
5193{
5194 return false;
5195}
5196
5182static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) 5197static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5183{ 5198{
5184 unsigned long flags; 5199 unsigned long flags;
@@ -7113,12 +7128,6 @@ failed:
7113 return ret; 7128 return ret;
7114} 7129}
7115 7130
7116static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
7117{
7118 /* Not supported */
7119 return 0;
7120}
7121
7122static int nested_enable_evmcs(struct kvm_vcpu *vcpu, 7131static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7123 uint16_t *vmcs_version) 7132 uint16_t *vmcs_version)
7124{ 7133{
@@ -7303,6 +7312,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7303 7312
7304 .pmu_ops = &amd_pmu_ops, 7313 .pmu_ops = &amd_pmu_ops,
7305 .deliver_posted_interrupt = svm_deliver_avic_intr, 7314 .deliver_posted_interrupt = svm_deliver_avic_intr,
7315 .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
7306 .update_pi_irte = svm_update_pi_irte, 7316 .update_pi_irte = svm_update_pi_irte,
7307 .setup_mce = svm_setup_mce, 7317 .setup_mce = svm_setup_mce,
7308 7318
@@ -7316,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7316 .mem_enc_unreg_region = svm_unregister_enc_region, 7326 .mem_enc_unreg_region = svm_unregister_enc_region,
7317 7327
7318 .nested_enable_evmcs = nested_enable_evmcs, 7328 .nested_enable_evmcs = nested_enable_evmcs,
7319 .nested_get_evmcs_version = nested_get_evmcs_version, 7329 .nested_get_evmcs_version = NULL,
7320 7330
7321 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, 7331 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7322}; 7332};
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0f1378789bd0..ced9fba32598 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -220,6 +220,8 @@ static void free_nested(struct kvm_vcpu *vcpu)
220 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 220 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
221 return; 221 return;
222 222
223 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
224
223 vmx->nested.vmxon = false; 225 vmx->nested.vmxon = false;
224 vmx->nested.smm.vmxon = false; 226 vmx->nested.smm.vmxon = false;
225 free_vpid(vmx->nested.vpid02); 227 free_vpid(vmx->nested.vpid02);
@@ -232,7 +234,9 @@ static void free_nested(struct kvm_vcpu *vcpu)
232 vmx->vmcs01.shadow_vmcs = NULL; 234 vmx->vmcs01.shadow_vmcs = NULL;
233 } 235 }
234 kfree(vmx->nested.cached_vmcs12); 236 kfree(vmx->nested.cached_vmcs12);
237 vmx->nested.cached_vmcs12 = NULL;
235 kfree(vmx->nested.cached_shadow_vmcs12); 238 kfree(vmx->nested.cached_shadow_vmcs12);
239 vmx->nested.cached_shadow_vmcs12 = NULL;
236 /* Unpin physical memory we referred to in the vmcs02 */ 240 /* Unpin physical memory we referred to in the vmcs02 */
237 if (vmx->nested.apic_access_page) { 241 if (vmx->nested.apic_access_page) {
238 kvm_release_page_dirty(vmx->nested.apic_access_page); 242 kvm_release_page_dirty(vmx->nested.apic_access_page);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index a279447eb75b..c030c96fc81a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6117,6 +6117,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6117 return max_irr; 6117 return max_irr;
6118} 6118}
6119 6119
6120static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
6121{
6122 return pi_test_on(vcpu_to_pi_desc(vcpu));
6123}
6124
6120static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 6125static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6121{ 6126{
6122 if (!kvm_vcpu_apicv_active(vcpu)) 6127 if (!kvm_vcpu_apicv_active(vcpu))
@@ -6598,6 +6603,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
6598 free_loaded_vmcs(vmx->loaded_vmcs); 6603 free_loaded_vmcs(vmx->loaded_vmcs);
6599 kfree(vmx->guest_msrs); 6604 kfree(vmx->guest_msrs);
6600 kvm_vcpu_uninit(vcpu); 6605 kvm_vcpu_uninit(vcpu);
6606 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
6601 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6607 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
6602 kmem_cache_free(kvm_vcpu_cache, vmx); 6608 kmem_cache_free(kvm_vcpu_cache, vmx);
6603} 6609}
@@ -6613,12 +6619,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6613 if (!vmx) 6619 if (!vmx)
6614 return ERR_PTR(-ENOMEM); 6620 return ERR_PTR(-ENOMEM);
6615 6621
6622 vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
6623 GFP_KERNEL_ACCOUNT);
6624 if (!vmx->vcpu.arch.user_fpu) {
6625 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
6626 err = -ENOMEM;
6627 goto free_partial_vcpu;
6628 }
6629
6616 vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, 6630 vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
6617 GFP_KERNEL_ACCOUNT); 6631 GFP_KERNEL_ACCOUNT);
6618 if (!vmx->vcpu.arch.guest_fpu) { 6632 if (!vmx->vcpu.arch.guest_fpu) {
6619 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); 6633 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
6620 err = -ENOMEM; 6634 err = -ENOMEM;
6621 goto free_partial_vcpu; 6635 goto free_user_fpu;
6622 } 6636 }
6623 6637
6624 vmx->vpid = allocate_vpid(); 6638 vmx->vpid = allocate_vpid();
@@ -6721,6 +6735,8 @@ uninit_vcpu:
6721free_vcpu: 6735free_vcpu:
6722 free_vpid(vmx->vpid); 6736 free_vpid(vmx->vpid);
6723 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6737 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
6738free_user_fpu:
6739 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
6724free_partial_vcpu: 6740free_partial_vcpu:
6725 kmem_cache_free(kvm_vcpu_cache, vmx); 6741 kmem_cache_free(kvm_vcpu_cache, vmx);
6726 return ERR_PTR(err); 6742 return ERR_PTR(err);
@@ -7715,6 +7731,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7715 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, 7731 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
7716 .sync_pir_to_irr = vmx_sync_pir_to_irr, 7732 .sync_pir_to_irr = vmx_sync_pir_to_irr,
7717 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 7733 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
7734 .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
7718 7735
7719 .set_tss_addr = vmx_set_tss_addr, 7736 .set_tss_addr = vmx_set_tss_addr,
7720 .set_identity_map_addr = vmx_set_identity_map_addr, 7737 .set_identity_map_addr = vmx_set_identity_map_addr,
@@ -7780,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7780 .set_nested_state = NULL, 7797 .set_nested_state = NULL,
7781 .get_vmcs12_pages = NULL, 7798 .get_vmcs12_pages = NULL,
7782 .nested_enable_evmcs = NULL, 7799 .nested_enable_evmcs = NULL,
7800 .nested_get_evmcs_version = NULL,
7783 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, 7801 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
7784}; 7802};
7785 7803
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 58305cf81182..290c3c3efb87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3306,6 +3306,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3306 3306
3307 kvm_x86_ops->vcpu_load(vcpu, cpu); 3307 kvm_x86_ops->vcpu_load(vcpu, cpu);
3308 3308
3309 fpregs_assert_state_consistent();
3310 if (test_thread_flag(TIF_NEED_FPU_LOAD))
3311 switch_fpu_return();
3312
3309 /* Apply any externally detected TSC adjustments (due to suspend) */ 3313 /* Apply any externally detected TSC adjustments (due to suspend) */
3310 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 3314 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
3311 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 3315 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
@@ -6590,12 +6594,13 @@ restart:
6590 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
6591 toggle_interruptibility(vcpu, ctxt->interruptibility); 6595 toggle_interruptibility(vcpu, ctxt->interruptibility);
6592 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6593 kvm_rip_write(vcpu, ctxt->eip);
6594 if (r == EMULATE_DONE && ctxt->tf)
6595 kvm_vcpu_do_singlestep(vcpu, &r);
6596 if (!ctxt->have_exception || 6597 if (!ctxt->have_exception ||
6597 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 6598 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
6599 kvm_rip_write(vcpu, ctxt->eip);
6600 if (r == EMULATE_DONE && ctxt->tf)
6601 kvm_vcpu_do_singlestep(vcpu, &r);
6598 __kvm_set_rflags(vcpu, ctxt->eflags); 6602 __kvm_set_rflags(vcpu, ctxt->eflags);
6603 }
6599 6604
6600 /* 6605 /*
6601 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 6606 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -7202,7 +7207,7 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
7202 7207
7203 rcu_read_unlock(); 7208 rcu_read_unlock();
7204 7209
7205 if (target) 7210 if (target && READ_ONCE(target->ready))
7206 kvm_vcpu_yield_to(target); 7211 kvm_vcpu_yield_to(target);
7207} 7212}
7208 7213
@@ -7242,6 +7247,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
7242 break; 7247 break;
7243 case KVM_HC_KICK_CPU: 7248 case KVM_HC_KICK_CPU:
7244 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); 7249 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
7250 kvm_sched_yield(vcpu->kvm, a1);
7245 ret = 0; 7251 ret = 0;
7246 break; 7252 break;
7247#ifdef CONFIG_X86_64 7253#ifdef CONFIG_X86_64
@@ -7990,9 +7996,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7990 trace_kvm_entry(vcpu->vcpu_id); 7996 trace_kvm_entry(vcpu->vcpu_id);
7991 guest_enter_irqoff(); 7997 guest_enter_irqoff();
7992 7998
7993 fpregs_assert_state_consistent(); 7999 /* The preempt notifier should have taken care of the FPU already. */
7994 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 8000 WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD));
7995 switch_fpu_return();
7996 8001
7997 if (unlikely(vcpu->arch.switch_db_regs)) { 8002 if (unlikely(vcpu->arch.switch_db_regs)) {
7998 set_debugreg(0, 7); 8003 set_debugreg(0, 7);
@@ -8270,7 +8275,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
8270{ 8275{
8271 fpregs_lock(); 8276 fpregs_lock();
8272 8277
8273 copy_fpregs_to_fpstate(&current->thread.fpu); 8278 copy_fpregs_to_fpstate(vcpu->arch.user_fpu);
8274 /* PKRU is separately restored in kvm_x86_ops->run. */ 8279 /* PKRU is separately restored in kvm_x86_ops->run. */
8275 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, 8280 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
8276 ~XFEATURE_MASK_PKRU); 8281 ~XFEATURE_MASK_PKRU);
@@ -8287,7 +8292,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
8287 fpregs_lock(); 8292 fpregs_lock();
8288 8293
8289 copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); 8294 copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
8290 copy_kernel_to_fpregs(&current->thread.fpu.state); 8295 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
8291 8296
8292 fpregs_mark_activate(); 8297 fpregs_mark_activate();
8293 fpregs_unlock(); 8298 fpregs_unlock();
@@ -9694,6 +9699,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
9694 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 9699 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
9695} 9700}
9696 9701
9702bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
9703{
9704 if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
9705 return true;
9706
9707 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
9708 kvm_test_request(KVM_REQ_SMI, vcpu) ||
9709 kvm_test_request(KVM_REQ_EVENT, vcpu))
9710 return true;
9711
9712 if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
9713 return true;
9714
9715 return false;
9716}
9717
9697bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 9718bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
9698{ 9719{
9699 return vcpu->arch.preempted_in_kernel; 9720 return vcpu->arch.preempted_in_kernel;
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index 04967cdce5d1..7ad68917a51e 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/types.h> 2#include <linux/types.h>
3#include <linux/export.h> 3#include <linux/export.h>
4#include <asm/cpu.h>
4 5
5unsigned int x86_family(unsigned int sig) 6unsigned int x86_family(unsigned int sig)
6{ 7{
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 6b468517ab71..73dc66d887f3 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -178,13 +178,15 @@ void FPU_printall(void)
178 for (i = 0; i < 8; i++) { 178 for (i = 0; i < 8; i++) {
179 FPU_REG *r = &st(i); 179 FPU_REG *r = &st(i);
180 u_char tagi = FPU_gettagi(i); 180 u_char tagi = FPU_gettagi(i);
181
181 switch (tagi) { 182 switch (tagi) {
182 case TAG_Empty: 183 case TAG_Empty:
183 continue; 184 continue;
184 break;
185 case TAG_Zero: 185 case TAG_Zero:
186 case TAG_Special: 186 case TAG_Special:
187 /* Update tagi for the printk below */
187 tagi = FPU_Special(r); 188 tagi = FPU_Special(r);
189 /* fall through */
188 case TAG_Valid: 190 case TAG_Valid:
189 printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, 191 printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
190 getsign(r) ? '-' : '+', 192 getsign(r) ? '-' : '+',
@@ -198,7 +200,6 @@ void FPU_printall(void)
198 printk("Whoops! Error in errors.c: tag%d is %d ", i, 200 printk("Whoops! Error in errors.c: tag%d is %d ", i,
199 tagi); 201 tagi);
200 continue; 202 continue;
201 break;
202 } 203 }
203 printk("%s\n", tag_desc[(int)(unsigned)tagi]); 204 printk("%s\n", tag_desc[(int)(unsigned)tagi]);
204 } 205 }
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 783c509f957a..127ea54122d7 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
1352 case TW_Denormal: 1352 case TW_Denormal:
1353 if (denormal_operand() < 0) 1353 if (denormal_operand() < 0)
1354 return; 1354 return;
1355 1355 /* fall through */
1356 case TAG_Zero: 1356 case TAG_Zero:
1357 case TAG_Valid: 1357 case TAG_Valid:
1358 setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr)); 1358 setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6c46095cd0d9..9ceacd1156db 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
177 177
178 pmd = pmd_offset(pud, address); 178 pmd = pmd_offset(pud, address);
179 pmd_k = pmd_offset(pud_k, address); 179 pmd_k = pmd_offset(pud_k, address);
180 if (!pmd_present(*pmd_k))
181 return NULL;
182 180
183 if (!pmd_present(*pmd)) 181 if (pmd_present(*pmd) != pmd_present(*pmd_k))
184 set_pmd(pmd, *pmd_k); 182 set_pmd(pmd, *pmd_k);
183
184 if (!pmd_present(*pmd_k))
185 return NULL;
185 else 186 else
186 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 187 BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
187 188
188 return pmd_k; 189 return pmd_k;
189} 190}
@@ -203,17 +204,13 @@ void vmalloc_sync_all(void)
203 spin_lock(&pgd_lock); 204 spin_lock(&pgd_lock);
204 list_for_each_entry(page, &pgd_list, lru) { 205 list_for_each_entry(page, &pgd_list, lru) {
205 spinlock_t *pgt_lock; 206 spinlock_t *pgt_lock;
206 pmd_t *ret;
207 207
208 /* the pgt_lock only for Xen */ 208 /* the pgt_lock only for Xen */
209 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 209 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
210 210
211 spin_lock(pgt_lock); 211 spin_lock(pgt_lock);
212 ret = vmalloc_sync_one(page_address(page), address); 212 vmalloc_sync_one(page_address(page), address);
213 spin_unlock(pgt_lock); 213 spin_unlock(pgt_lock);
214
215 if (!ret)
216 break;
217 } 214 }
218 spin_unlock(&pgd_lock); 215 spin_unlock(&pgd_lock);
219 } 216 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 6a9a77a403c9..e14e95ea7338 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
516 */ 516 */
517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, 517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
518 unsigned long pfn, unsigned long npg, 518 unsigned long pfn, unsigned long npg,
519 int warnlvl) 519 unsigned long lpsize, int warnlvl)
520{ 520{
521 pgprotval_t forbidden, res; 521 pgprotval_t forbidden, res;
522 unsigned long end; 522 unsigned long end;
@@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); 535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
536 forbidden = res; 536 forbidden = res;
537 537
538 res = protect_kernel_text_ro(start, end); 538 /*
539 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); 539 * Special case to preserve a large page. If the change spawns the
540 forbidden |= res; 540 * full large page mapping then there is no point to split it
541 * up. Happens with ftrace and is going to be removed once ftrace
542 * switched to text_poke().
543 */
544 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
545 res = protect_kernel_text_ro(start, end);
546 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
547 forbidden |= res;
548 }
541 549
542 /* Check the PFN directly */ 550 /* Check the PFN directly */
543 res = protect_pci_bios(pfn, pfn + npg - 1); 551 res = protect_pci_bios(pfn, pfn + npg - 1);
@@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
819 * extra conditional required here. 827 * extra conditional required here.
820 */ 828 */
821 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, 829 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
822 CPA_CONFLICT); 830 psize, CPA_CONFLICT);
823 831
824 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { 832 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
825 /* 833 /*
@@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
855 * protection requirement in the large page. 863 * protection requirement in the large page.
856 */ 864 */
857 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, 865 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
858 CPA_DETECT); 866 psize, CPA_DETECT);
859 867
860 /* 868 /*
861 * If there is a conflict, split the large page. 869 * If there is a conflict, split the large page.
@@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
906 if (!cpa->force_static_prot) 914 if (!cpa->force_static_prot)
907 goto set; 915 goto set;
908 916
909 prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); 917 /* Hand in lpsize = 0 to enforce the protection mechanism */
918 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
910 919
911 if (pgprot_val(prot) == pgprot_val(ref_prot)) 920 if (pgprot_val(prot) == pgprot_val(ref_prot))
912 goto set; 921 goto set;
@@ -1503,7 +1512,8 @@ repeat:
1503 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 1512 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1504 1513
1505 cpa_inc_4k_install(); 1514 cpa_inc_4k_install();
1506 new_prot = static_protections(new_prot, address, pfn, 1, 1515 /* Hand in lpsize = 0 to enforce the protection mechanism */
1516 new_prot = static_protections(new_prot, address, pfn, 1, 0,
1507 CPA_PROTECT); 1517 CPA_PROTECT);
1508 1518
1509 new_prot = pgprot_clear_protnone_bits(new_prot); 1519 new_prot = pgprot_clear_protnone_bits(new_prot);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index eaaed5bfc4a4..991549a1c5f3 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
390 390
391 emit_prologue(&prog, bpf_prog->aux->stack_depth, 391 emit_prologue(&prog, bpf_prog->aux->stack_depth,
392 bpf_prog_was_classic(bpf_prog)); 392 bpf_prog_was_classic(bpf_prog));
393 addrs[0] = prog - temp;
393 394
394 for (i = 0; i < insn_cnt; i++, insn++) { 395 for (i = 1; i <= insn_cnt; i++, insn++) {
395 const s32 imm32 = insn->imm; 396 const s32 imm32 = insn->imm;
396 u32 dst_reg = insn->dst_reg; 397 u32 dst_reg = insn->dst_reg;
397 u32 src_reg = insn->src_reg; 398 u32 src_reg = insn->src_reg;
@@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1105 extra_pass = true; 1106 extra_pass = true;
1106 goto skip_init_addrs; 1107 goto skip_init_addrs;
1107 } 1108 }
1108 addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); 1109 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1109 if (!addrs) { 1110 if (!addrs) {
1110 prog = orig_prog; 1111 prog = orig_prog;
1111 goto out_addrs; 1112 goto out_addrs;
@@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1115 * Before first pass, make a rough estimation of addrs[] 1116 * Before first pass, make a rough estimation of addrs[]
1116 * each BPF instruction is translated to less than 64 bytes 1117 * each BPF instruction is translated to less than 64 bytes
1117 */ 1118 */
1118 for (proglen = 0, i = 0; i < prog->len; i++) { 1119 for (proglen = 0, i = 0; i <= prog->len; i++) {
1119 proglen += 64; 1120 proglen += 64;
1120 addrs[i] = proglen; 1121 addrs[i] = proglen;
1121 } 1122 }
@@ -1180,7 +1181,7 @@ out_image:
1180 1181
1181 if (!image || !prog->is_func || extra_pass) { 1182 if (!image || !prog->is_func || extra_pass) {
1182 if (image) 1183 if (image)
1183 bpf_prog_fill_jited_linfo(prog, addrs); 1184 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1184out_addrs: 1185out_addrs:
1185 kfree(addrs); 1186 kfree(addrs);
1186 kfree(jit_data); 1187 kfree(jit_data);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 24b079e94bc2..c9ef6a7a4a1a 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -12,6 +12,7 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/tboot.h> 14#include <linux/tboot.h>
15#include <linux/dmi.h>
15 16
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17#include <asm/proto.h> 18#include <asm/proto.h>
@@ -23,7 +24,7 @@
23#include <asm/debugreg.h> 24#include <asm/debugreg.h>
24#include <asm/cpu.h> 25#include <asm/cpu.h>
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <linux/dmi.h> 27#include <asm/cpu_device_id.h>
27 28
28#ifdef CONFIG_X86_32 29#ifdef CONFIG_X86_32
29__visible unsigned long saved_context_ebx; 30__visible unsigned long saved_context_ebx;
@@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
397 398
398core_initcall(bsp_pm_check_init); 399core_initcall(bsp_pm_check_init);
399 400
400static int msr_init_context(const u32 *msr_id, const int total_num) 401static int msr_build_context(const u32 *msr_id, const int num)
401{ 402{
402 int i = 0; 403 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
403 struct saved_msr *msr_array; 404 struct saved_msr *msr_array;
405 int total_num;
406 int i, j;
404 407
405 if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { 408 total_num = saved_msrs->num + num;
406 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
407 return -EINVAL;
408 }
409 409
410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); 410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
411 if (!msr_array) { 411 if (!msr_array) {
@@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
413 return -ENOMEM; 413 return -ENOMEM;
414 } 414 }
415 415
416 for (i = 0; i < total_num; i++) { 416 if (saved_msrs->array) {
417 msr_array[i].info.msr_no = msr_id[i]; 417 /*
418 * Multiple callbacks can invoke this function, so copy any
419 * MSR save requests from previous invocations.
420 */
421 memcpy(msr_array, saved_msrs->array,
422 sizeof(struct saved_msr) * saved_msrs->num);
423
424 kfree(saved_msrs->array);
425 }
426
427 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
428 msr_array[i].info.msr_no = msr_id[j];
418 msr_array[i].valid = false; 429 msr_array[i].valid = false;
419 msr_array[i].info.reg.q = 0; 430 msr_array[i].info.reg.q = 0;
420 } 431 }
421 saved_context.saved_msrs.num = total_num; 432 saved_msrs->num = total_num;
422 saved_context.saved_msrs.array = msr_array; 433 saved_msrs->array = msr_array;
423 434
424 return 0; 435 return 0;
425} 436}
426 437
427/* 438/*
428 * The following section is a quirk framework for problematic BIOSen: 439 * The following sections are a quirk framework for problematic BIOSen:
429 * Sometimes MSRs are modified by the BIOSen after suspended to 440 * Sometimes MSRs are modified by the BIOSen after suspended to
430 * RAM, this might cause unexpected behavior after wakeup. 441 * RAM, this might cause unexpected behavior after wakeup.
431 * Thus we save/restore these specified MSRs across suspend/resume 442 * Thus we save/restore these specified MSRs across suspend/resume
@@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
440 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; 451 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
441 452
442 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); 453 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
443 return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); 454 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
444} 455}
445 456
446static const struct dmi_system_id msr_save_dmi_table[] = { 457static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
455 {} 466 {}
456}; 467};
457 468
469static int msr_save_cpuid_features(const struct x86_cpu_id *c)
470{
471 u32 cpuid_msr_id[] = {
472 MSR_AMD64_CPUID_FN_1,
473 };
474
475 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
476 c->family);
477
478 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
479}
480
481static const struct x86_cpu_id msr_save_cpu_table[] = {
482 {
483 .vendor = X86_VENDOR_AMD,
484 .family = 0x15,
485 .model = X86_MODEL_ANY,
486 .feature = X86_FEATURE_ANY,
487 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
488 },
489 {
490 .vendor = X86_VENDOR_AMD,
491 .family = 0x16,
492 .model = X86_MODEL_ANY,
493 .feature = X86_FEATURE_ANY,
494 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
495 },
496 {}
497};
498
499typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
500static int pm_cpu_check(const struct x86_cpu_id *c)
501{
502 const struct x86_cpu_id *m;
503 int ret = 0;
504
505 m = x86_match_cpu(msr_save_cpu_table);
506 if (m) {
507 pm_cpu_match_t fn;
508
509 fn = (pm_cpu_match_t)m->driver_data;
510 ret = fn(m);
511 }
512
513 return ret;
514}
515
458static int pm_check_save_msr(void) 516static int pm_check_save_msr(void)
459{ 517{
460 dmi_check_system(msr_save_dmi_table); 518 dmi_check_system(msr_save_dmi_table);
519 pm_cpu_check(msr_save_cpu_table);
520
461 return 0; 521 return 0;
462} 522}
463 523
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 3cf302b26332..8901a1f89cf5 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
6targets += $(purgatory-y) 6targets += $(purgatory-y)
7PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) 7PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
8 8
9$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
10 $(call if_changed_rule,cc_o_c)
11
9$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE 12$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
10 $(call if_changed_rule,cc_o_c) 13 $(call if_changed_rule,cc_o_c)
11 14
@@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n
17 20
18# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That 21# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
19# in turn leaves some undefined symbols like __fentry__ in purgatory and not 22# in turn leaves some undefined symbols like __fentry__ in purgatory and not
20# sure how to relocate those. Like kexec-tools, use custom flags. 23# sure how to relocate those.
21 24ifdef CONFIG_FUNCTION_TRACER
22KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large 25CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
23KBUILD_CFLAGS += -m$(BITS) 26CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
24KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 27CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
28CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
29endif
30
31ifdef CONFIG_STACKPROTECTOR
32CFLAGS_REMOVE_sha256.o += -fstack-protector
33CFLAGS_REMOVE_purgatory.o += -fstack-protector
34CFLAGS_REMOVE_string.o += -fstack-protector
35CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
36endif
37
38ifdef CONFIG_STACKPROTECTOR_STRONG
39CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
40CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
41CFLAGS_REMOVE_string.o += -fstack-protector-strong
42CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
43endif
44
45ifdef CONFIG_RETPOLINE
46CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
47CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
48CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
49CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
50endif
25 51
26$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 52$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
27 $(call if_changed,ld) 53 $(call if_changed,ld)
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 6d8d5a34c377..b607bda786f6 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -68,3 +68,9 @@ void purgatory(void)
68 } 68 }
69 copy_backup_region(); 69 copy_backup_region();
70} 70}
71
72/*
73 * Defined in order to reuse memcpy() and memset() from
74 * arch/x86/boot/compressed/string.c
75 */
76void warn(const char *msg) {}
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
deleted file mode 100644
index 01ad43873ad9..000000000000
--- a/arch/x86/purgatory/string.c
+++ /dev/null
@@ -1,23 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple string functions.
4 *
5 * Copyright (C) 2014 Red Hat Inc.
6 *
7 * Author:
8 * Vivek Goyal <vgoyal@redhat.com>
9 */
10
11#include <linux/types.h>
12
13#include "../boot/string.c"
14
15void *memcpy(void *dst, const void *src, size_t len)
16{
17 return __builtin_memcpy(dst, src, len);
18}
19
20void *memset(void *dst, int c, size_t len)
21{
22 return __builtin_memset(dst, c, len);
23}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 60c220020054..80828b95a51f 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -14,6 +14,7 @@
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/asmmacro.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/coprocessor.h> 19#include <asm/coprocessor.h>
19#include <asm/thread_info.h> 20#include <asm/thread_info.h>
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 5cb8a62e091c..7c3106093c75 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -511,6 +511,7 @@ void cpu_reset(void)
511 "add %2, %2, %7\n\t" 511 "add %2, %2, %7\n\t"
512 "addi %0, %0, -1\n\t" 512 "addi %0, %0, -1\n\t"
513 "bnez %0, 1b\n\t" 513 "bnez %0, 1b\n\t"
514 "isync\n\t"
514 /* Jump to identity mapping */ 515 /* Jump to identity mapping */
515 "jx %3\n" 516 "jx %3\n"
516 "2:\n\t" 517 "2:\n\t"
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 72860325245a..b33be928d164 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1924,12 +1924,13 @@ static void bfq_add_request(struct request *rq)
1924 * confirmed no later than during the next 1924 * confirmed no later than during the next
1925 * I/O-plugging interval for bfqq. 1925 * I/O-plugging interval for bfqq.
1926 */ 1926 */
1927 if (!bfq_bfqq_has_short_ttime(bfqq) && 1927 if (bfqd->last_completed_rq_bfqq &&
1928 !bfq_bfqq_has_short_ttime(bfqq) &&
1928 ktime_get_ns() - bfqd->last_completion < 1929 ktime_get_ns() - bfqd->last_completion <
1929 200 * NSEC_PER_USEC) { 1930 200 * NSEC_PER_USEC) {
1930 if (bfqd->last_completed_rq_bfqq != bfqq && 1931 if (bfqd->last_completed_rq_bfqq != bfqq &&
1931 bfqd->last_completed_rq_bfqq != 1932 bfqd->last_completed_rq_bfqq !=
1932 bfqq->waker_bfqq) { 1933 bfqq->waker_bfqq) {
1933 /* 1934 /*
1934 * First synchronization detected with 1935 * First synchronization detected with
1935 * a candidate waker queue, or with a 1936 * a candidate waker queue, or with a
@@ -2250,9 +2251,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
2250 blk_rq_pos(container_of(rb_prev(&req->rb_node), 2251 blk_rq_pos(container_of(rb_prev(&req->rb_node),
2251 struct request, rb_node))) { 2252 struct request, rb_node))) {
2252 struct bfq_queue *bfqq = bfq_init_rq(req); 2253 struct bfq_queue *bfqq = bfq_init_rq(req);
2253 struct bfq_data *bfqd = bfqq->bfqd; 2254 struct bfq_data *bfqd;
2254 struct request *prev, *next_rq; 2255 struct request *prev, *next_rq;
2255 2256
2257 if (!bfqq)
2258 return;
2259
2260 bfqd = bfqq->bfqd;
2261
2256 /* Reposition request in its sort_list */ 2262 /* Reposition request in its sort_list */
2257 elv_rb_del(&bfqq->sort_list, req); 2263 elv_rb_del(&bfqq->sort_list, req);
2258 elv_rb_add(&bfqq->sort_list, req); 2264 elv_rb_add(&bfqq->sort_list, req);
@@ -2299,6 +2305,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
2299 struct bfq_queue *bfqq = bfq_init_rq(rq), 2305 struct bfq_queue *bfqq = bfq_init_rq(rq),
2300 *next_bfqq = bfq_init_rq(next); 2306 *next_bfqq = bfq_init_rq(next);
2301 2307
2308 if (!bfqq)
2309 return;
2310
2302 /* 2311 /*
2303 * If next and rq belong to the same bfq_queue and next is older 2312 * If next and rq belong to the same bfq_queue and next is older
2304 * than rq, then reposition rq in the fifo (by substituting next 2313 * than rq, then reposition rq in the fifo (by substituting next
@@ -3354,38 +3363,57 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
3354 * there is no active group, then the primary expectation for 3363 * there is no active group, then the primary expectation for
3355 * this device is probably a high throughput. 3364 * this device is probably a high throughput.
3356 * 3365 *
3357 * We are now left only with explaining the additional 3366 * We are now left only with explaining the two sub-conditions in the
3358 * compound condition that is checked below for deciding 3367 * additional compound condition that is checked below for deciding
3359 * whether the scenario is asymmetric. To explain this 3368 * whether the scenario is asymmetric. To explain the first
3360 * compound condition, we need to add that the function 3369 * sub-condition, we need to add that the function
3361 * bfq_asymmetric_scenario checks the weights of only 3370 * bfq_asymmetric_scenario checks the weights of only
3362 * non-weight-raised queues, for efficiency reasons (see 3371 * non-weight-raised queues, for efficiency reasons (see comments on
3363 * comments on bfq_weights_tree_add()). Then the fact that 3372 * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
3364 * bfqq is weight-raised is checked explicitly here. More 3373 * is checked explicitly here. More precisely, the compound condition
3365 * precisely, the compound condition below takes into account 3374 * below takes into account also the fact that, even if bfqq is being
3366 * also the fact that, even if bfqq is being weight-raised, 3375 * weight-raised, the scenario is still symmetric if all queues with
3367 * the scenario is still symmetric if all queues with requests 3376 * requests waiting for completion happen to be
3368 * waiting for completion happen to be 3377 * weight-raised. Actually, we should be even more precise here, and
3369 * weight-raised. Actually, we should be even more precise 3378 * differentiate between interactive weight raising and soft real-time
3370 * here, and differentiate between interactive weight raising 3379 * weight raising.
3371 * and soft real-time weight raising. 3380 *
3381 * The second sub-condition checked in the compound condition is
3382 * whether there is a fair amount of already in-flight I/O not
3383 * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
3384 * following reason. The drive may decide to serve in-flight
3385 * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
3386 * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
3387 * I/O-dispatching is not plugged, then, while bfqq remains empty, a
3388 * basically uncontrolled amount of I/O from other queues may be
3389 * dispatched too, possibly causing the service of bfqq's I/O to be
3390 * delayed even longer in the drive. This problem gets more and more
3391 * serious as the speed and the queue depth of the drive grow,
3392 * because, as these two quantities grow, the probability to find no
3393 * queue busy but many requests in flight grows too. By contrast,
3394 * plugging I/O dispatching minimizes the delay induced by already
3395 * in-flight I/O, and enables bfqq to recover the bandwidth it may
3396 * lose because of this delay.
3372 * 3397 *
3373 * As a side note, it is worth considering that the above 3398 * As a side note, it is worth considering that the above
3374 * device-idling countermeasures may however fail in the 3399 * device-idling countermeasures may however fail in the following
3375 * following unlucky scenario: if idling is (correctly) 3400 * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
3376 * disabled in a time period during which all symmetry 3401 * in a time period during which all symmetry sub-conditions hold, and
3377 * sub-conditions hold, and hence the device is allowed to 3402 * therefore the device is allowed to enqueue many requests, but at
3378 * enqueue many requests, but at some later point in time some 3403 * some later point in time some sub-condition stops to hold, then it
3379 * sub-condition stops to hold, then it may become impossible 3404 * may become impossible to make requests be served in the desired
3380 * to let requests be served in the desired order until all 3405 * order until all the requests already queued in the device have been
3381 * the requests already queued in the device have been served. 3406 * served. The last sub-condition commented above somewhat mitigates
3407 * this problem for weight-raised queues.
3382 */ 3408 */
3383static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, 3409static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
3384 struct bfq_queue *bfqq) 3410 struct bfq_queue *bfqq)
3385{ 3411{
3386 return (bfqq->wr_coeff > 1 && 3412 return (bfqq->wr_coeff > 1 &&
3387 bfqd->wr_busy_queues < 3413 (bfqd->wr_busy_queues <
3388 bfq_tot_busy_queues(bfqd)) || 3414 bfq_tot_busy_queues(bfqd) ||
3415 bfqd->rq_in_driver >=
3416 bfqq->dispatched + 4)) ||
3389 bfq_asymmetric_scenario(bfqd, bfqq); 3417 bfq_asymmetric_scenario(bfqd, bfqq);
3390} 3418}
3391 3419
@@ -4745,6 +4773,8 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
4745 */ 4773 */
4746void bfq_put_queue(struct bfq_queue *bfqq) 4774void bfq_put_queue(struct bfq_queue *bfqq)
4747{ 4775{
4776 struct bfq_queue *item;
4777 struct hlist_node *n;
4748#ifdef CONFIG_BFQ_GROUP_IOSCHED 4778#ifdef CONFIG_BFQ_GROUP_IOSCHED
4749 struct bfq_group *bfqg = bfqq_group(bfqq); 4779 struct bfq_group *bfqg = bfqq_group(bfqq);
4750#endif 4780#endif
@@ -4789,6 +4819,36 @@ void bfq_put_queue(struct bfq_queue *bfqq)
4789 bfqq->bfqd->burst_size--; 4819 bfqq->bfqd->burst_size--;
4790 } 4820 }
4791 4821
4822 /*
4823 * bfqq does not exist any longer, so it cannot be woken by
4824 * any other queue, and cannot wake any other queue. Then bfqq
4825 * must be removed from the woken list of its possible waker
4826 * queue, and all queues in the woken list of bfqq must stop
4827 * having a waker queue. Strictly speaking, these updates
4828 * should be performed when bfqq remains with no I/O source
4829 * attached to it, which happens before bfqq gets freed. In
4830 * particular, this happens when the last process associated
4831 * with bfqq exits or gets associated with a different
4832 * queue. However, both events lead to bfqq being freed soon,
4833 * and dangling references would come out only after bfqq gets
4834 * freed. So these updates are done here, as a simple and safe
4835 * way to handle all cases.
4836 */
4837 /* remove bfqq from woken list */
4838 if (!hlist_unhashed(&bfqq->woken_list_node))
4839 hlist_del_init(&bfqq->woken_list_node);
4840
4841 /* reset waker for all queues in woken list */
4842 hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
4843 woken_list_node) {
4844 item->waker_bfqq = NULL;
4845 bfq_clear_bfqq_has_waker(item);
4846 hlist_del_init(&item->woken_list_node);
4847 }
4848
4849 if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
4850 bfqq->bfqd->last_completed_rq_bfqq = NULL;
4851
4792 kmem_cache_free(bfq_pool, bfqq); 4852 kmem_cache_free(bfq_pool, bfqq);
4793#ifdef CONFIG_BFQ_GROUP_IOSCHED 4853#ifdef CONFIG_BFQ_GROUP_IOSCHED
4794 bfqg_and_blkg_put(bfqg); 4854 bfqg_and_blkg_put(bfqg);
@@ -4816,9 +4876,6 @@ static void bfq_put_cooperator(struct bfq_queue *bfqq)
4816 4876
4817static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) 4877static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4818{ 4878{
4819 struct bfq_queue *item;
4820 struct hlist_node *n;
4821
4822 if (bfqq == bfqd->in_service_queue) { 4879 if (bfqq == bfqd->in_service_queue) {
4823 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT); 4880 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
4824 bfq_schedule_dispatch(bfqd); 4881 bfq_schedule_dispatch(bfqd);
@@ -4828,18 +4885,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4828 4885
4829 bfq_put_cooperator(bfqq); 4886 bfq_put_cooperator(bfqq);
4830 4887
4831 /* remove bfqq from woken list */
4832 if (!hlist_unhashed(&bfqq->woken_list_node))
4833 hlist_del_init(&bfqq->woken_list_node);
4834
4835 /* reset waker for all queues in woken list */
4836 hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
4837 woken_list_node) {
4838 item->waker_bfqq = NULL;
4839 bfq_clear_bfqq_has_waker(item);
4840 hlist_del_init(&item->woken_list_node);
4841 }
4842
4843 bfq_put_queue(bfqq); /* release process reference */ 4888 bfq_put_queue(bfqq); /* release process reference */
4844} 4889}
4845 4890
@@ -5417,12 +5462,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
5417 5462
5418 spin_lock_irq(&bfqd->lock); 5463 spin_lock_irq(&bfqd->lock);
5419 bfqq = bfq_init_rq(rq); 5464 bfqq = bfq_init_rq(rq);
5420 if (at_head || blk_rq_is_passthrough(rq)) { 5465 if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
5421 if (at_head) 5466 if (at_head)
5422 list_add(&rq->queuelist, &bfqd->dispatch); 5467 list_add(&rq->queuelist, &bfqd->dispatch);
5423 else 5468 else
5424 list_add_tail(&rq->queuelist, &bfqd->dispatch); 5469 list_add_tail(&rq->queuelist, &bfqd->dispatch);
5425 } else { /* bfqq is assumed to be non null here */ 5470 } else {
5426 idle_timer_disabled = __bfq_insert_request(bfqd, rq); 5471 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
5427 /* 5472 /*
5428 * Update bfqq, because, if a queue merge has occurred 5473 * Update bfqq, because, if a queue merge has occurred
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 24ed26957367..55a7dc227dfb 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -54,7 +54,7 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
54 54
55static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 55static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
56 56
57static bool blkcg_debug_stats = false; 57bool blkcg_debug_stats = false;
58static struct workqueue_struct *blkcg_punt_bio_wq; 58static struct workqueue_struct *blkcg_punt_bio_wq;
59 59
60static bool blkcg_policy_enabled(struct request_queue *q, 60static bool blkcg_policy_enabled(struct request_queue *q,
@@ -944,10 +944,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
944 dbytes, dios); 944 dbytes, dios);
945 } 945 }
946 946
947 if (!blkcg_debug_stats) 947 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
948 goto next;
949
950 if (atomic_read(&blkg->use_delay)) {
951 has_stats = true; 948 has_stats = true;
952 off += scnprintf(buf+off, size-off, 949 off += scnprintf(buf+off, size-off,
953 " use_delay=%d delay_nsec=%llu", 950 " use_delay=%d delay_nsec=%llu",
@@ -967,7 +964,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
967 has_stats = true; 964 has_stats = true;
968 off += written; 965 off += written;
969 } 966 }
970next: 967
971 if (has_stats) { 968 if (has_stats) {
972 if (off < size - 1) { 969 if (off < size - 1) {
973 off += scnprintf(buf+off, size-off, "\n"); 970 off += scnprintf(buf+off, size-off, "\n");
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index d973c38ee4fd..0fff7b56df0e 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -917,6 +917,9 @@ static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
917 unsigned long long avg_lat; 917 unsigned long long avg_lat;
918 unsigned long long cur_win; 918 unsigned long long cur_win;
919 919
920 if (!blkcg_debug_stats)
921 return 0;
922
920 if (iolat->ssd) 923 if (iolat->ssd)
921 return iolatency_ssd_stat(iolat, buf, size); 924 return iolatency_ssd_stat(iolat, buf, size);
922 925
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index cf22ab00fefb..126021fc3a11 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -61,15 +61,6 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
61 e->type->ops.completed_request(rq, now); 61 e->type->ops.completed_request(rq, now);
62} 62}
63 63
64static inline void blk_mq_sched_started_request(struct request *rq)
65{
66 struct request_queue *q = rq->q;
67 struct elevator_queue *e = q->elevator;
68
69 if (e && e->type->ops.started_request)
70 e->type->ops.started_request(rq);
71}
72
73static inline void blk_mq_sched_requeue_request(struct request *rq) 64static inline void blk_mq_sched_requeue_request(struct request *rq)
74{ 65{
75 struct request_queue *q = rq->q; 66 struct request_queue *q = rq->q;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b038ec680e84..0835f4d8d42e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -669,8 +669,6 @@ void blk_mq_start_request(struct request *rq)
669{ 669{
670 struct request_queue *q = rq->q; 670 struct request_queue *q = rq->q;
671 671
672 blk_mq_sched_started_request(rq);
673
674 trace_block_rq_issue(q, rq); 672 trace_block_rq_issue(q, rq);
675 673
676 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 674 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
@@ -2664,8 +2662,6 @@ void blk_mq_release(struct request_queue *q)
2664 struct blk_mq_hw_ctx *hctx, *next; 2662 struct blk_mq_hw_ctx *hctx, *next;
2665 int i; 2663 int i;
2666 2664
2667 cancel_delayed_work_sync(&q->requeue_work);
2668
2669 queue_for_each_hw_ctx(q, hctx, i) 2665 queue_for_each_hw_ctx(q, hctx, i)
2670 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 2666 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2671 2667
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 659ccb8b693f..3954c0dc1443 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -202,6 +202,7 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
202 return -1; 202 return -1;
203 203
204 data->got_token = true; 204 data->got_token = true;
205 smp_wmb();
205 list_del_init(&curr->entry); 206 list_del_init(&curr->entry);
206 wake_up_process(data->task); 207 wake_up_process(data->task);
207 return 1; 208 return 1;
@@ -244,7 +245,9 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
244 return; 245 return;
245 246
246 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); 247 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
248 has_sleeper = !wq_has_single_sleeper(&rqw->wait);
247 do { 249 do {
250 /* The memory barrier in set_task_state saves us here. */
248 if (data.got_token) 251 if (data.got_token)
249 break; 252 break;
250 if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) { 253 if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
@@ -255,12 +258,14 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
255 * which means we now have two. Put our local token 258 * which means we now have two. Put our local token
256 * and wake anyone else potentially waiting for one. 259 * and wake anyone else potentially waiting for one.
257 */ 260 */
261 smp_rmb();
258 if (data.got_token) 262 if (data.got_token)
259 cleanup_cb(rqw, private_data); 263 cleanup_cb(rqw, private_data);
260 break; 264 break;
261 } 265 }
262 io_schedule(); 266 io_schedule();
263 has_sleeper = false; 267 has_sleeper = true;
268 set_current_state(TASK_UNINTERRUPTIBLE);
264 } while (1); 269 } while (1);
265 finish_wait(&rqw->wait, &data.wq); 270 finish_wait(&rqw->wait, &data.wq);
266} 271}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 2ae348c101a0..2c1831207a8f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -752,7 +752,8 @@ void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
752 * page (which might not be idential to the Linux PAGE_SIZE). Because 752 * page (which might not be idential to the Linux PAGE_SIZE). Because
753 * of that they are not limited by our notion of "segment size". 753 * of that they are not limited by our notion of "segment size".
754 */ 754 */
755 q->limits.max_segment_size = UINT_MAX; 755 if (mask)
756 q->limits.max_segment_size = UINT_MAX;
756} 757}
757EXPORT_SYMBOL(blk_queue_virt_boundary); 758EXPORT_SYMBOL(blk_queue_virt_boundary);
758 759
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 977c659dcd18..9bfa3ea4ed63 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
892 892
893 blk_free_queue_stats(q->stats); 893 blk_free_queue_stats(q->stats);
894 894
895 if (queue_is_mq(q))
896 cancel_delayed_work_sync(&q->requeue_work);
897
895 blk_exit_queue(q); 898 blk_exit_queue(q);
896 899
897 blk_queue_free_zone_bitmaps(q); 900 blk_queue_free_zone_bitmaps(q);
diff --git a/block/genhd.c b/block/genhd.c
index 97887e59f3b2..54f1f0d381f4 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1969,7 +1969,7 @@ static const struct attribute *disk_events_attrs[] = {
1969 * The default polling interval can be specified by the kernel 1969 * The default polling interval can be specified by the kernel
1970 * parameter block.events_dfl_poll_msecs which defaults to 0 1970 * parameter block.events_dfl_poll_msecs which defaults to 0
1971 * (disable). This can also be modified runtime by writing to 1971 * (disable). This can also be modified runtime by writing to
1972 * /sys/module/block/events_dfl_poll_msecs. 1972 * /sys/module/block/parameters/events_dfl_poll_msecs.
1973 */ 1973 */
1974static int disk_events_set_dfl_poll_msecs(const char *val, 1974static int disk_events_set_dfl_poll_msecs(const char *val,
1975 const struct kernel_param *kp) 1975 const struct kernel_param *kp)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index d4551e33fa71..8569b79e8b58 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
611 611
612 /* Move to ITS specific data */ 612 /* Move to ITS specific data */
613 its = (struct acpi_iort_its_group *)node->node_data; 613 its = (struct acpi_iort_its_group *)node->node_data;
614 if (idx > its->its_count) { 614 if (idx >= its->its_count) {
615 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 615 dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
616 idx, its->its_count); 616 idx, its->its_count);
617 return -ENXIO; 617 return -ENXIO;
618 } 618 }
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 28cffaaf9d82..f616b16c1f0b 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
232 if (device->power.flags.power_resources) 232 if (device->power.flags.power_resources)
233 result = acpi_power_transition(device, target_state); 233 result = acpi_power_transition(device, target_state);
234 } else { 234 } else {
235 int cur_state = device->power.state;
236
235 if (device->power.flags.power_resources) { 237 if (device->power.flags.power_resources) {
236 result = acpi_power_transition(device, ACPI_STATE_D0); 238 result = acpi_power_transition(device, ACPI_STATE_D0);
237 if (result) 239 if (result)
238 goto end; 240 goto end;
239 } 241 }
240 242
241 if (device->power.state == ACPI_STATE_D0) { 243 if (cur_state == ACPI_STATE_D0) {
242 int psc; 244 int psc;
243 245
244 /* Nothing to do here if _PSC is not present. */ 246 /* Nothing to do here if _PSC is not present. */
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index c02fa27dd3f3..1413324982f0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1282,7 +1282,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
1282 if (rc) 1282 if (rc)
1283 return rc; 1283 return rc;
1284 1284
1285 device_lock(dev); 1285 nfit_device_lock(dev);
1286 nd_desc = dev_get_drvdata(dev); 1286 nd_desc = dev_get_drvdata(dev);
1287 if (nd_desc) { 1287 if (nd_desc) {
1288 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1288 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
@@ -1299,7 +1299,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
1299 break; 1299 break;
1300 } 1300 }
1301 } 1301 }
1302 device_unlock(dev); 1302 nfit_device_unlock(dev);
1303 if (rc) 1303 if (rc)
1304 return rc; 1304 return rc;
1305 return size; 1305 return size;
@@ -1319,7 +1319,7 @@ static ssize_t scrub_show(struct device *dev,
1319 ssize_t rc = -ENXIO; 1319 ssize_t rc = -ENXIO;
1320 bool busy; 1320 bool busy;
1321 1321
1322 device_lock(dev); 1322 nfit_device_lock(dev);
1323 nd_desc = dev_get_drvdata(dev); 1323 nd_desc = dev_get_drvdata(dev);
1324 if (!nd_desc) { 1324 if (!nd_desc) {
1325 device_unlock(dev); 1325 device_unlock(dev);
@@ -1339,7 +1339,7 @@ static ssize_t scrub_show(struct device *dev,
1339 } 1339 }
1340 1340
1341 mutex_unlock(&acpi_desc->init_mutex); 1341 mutex_unlock(&acpi_desc->init_mutex);
1342 device_unlock(dev); 1342 nfit_device_unlock(dev);
1343 return rc; 1343 return rc;
1344} 1344}
1345 1345
@@ -1356,14 +1356,14 @@ static ssize_t scrub_store(struct device *dev,
1356 if (val != 1) 1356 if (val != 1)
1357 return -EINVAL; 1357 return -EINVAL;
1358 1358
1359 device_lock(dev); 1359 nfit_device_lock(dev);
1360 nd_desc = dev_get_drvdata(dev); 1360 nd_desc = dev_get_drvdata(dev);
1361 if (nd_desc) { 1361 if (nd_desc) {
1362 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1362 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1363 1363
1364 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); 1364 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1365 } 1365 }
1366 device_unlock(dev); 1366 nfit_device_unlock(dev);
1367 if (rc) 1367 if (rc)
1368 return rc; 1368 return rc;
1369 return size; 1369 return size;
@@ -1749,9 +1749,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1749 struct acpi_device *adev = data; 1749 struct acpi_device *adev = data;
1750 struct device *dev = &adev->dev; 1750 struct device *dev = &adev->dev;
1751 1751
1752 device_lock(dev->parent); 1752 nfit_device_lock(dev->parent);
1753 __acpi_nvdimm_notify(dev, event); 1753 __acpi_nvdimm_notify(dev, event);
1754 device_unlock(dev->parent); 1754 nfit_device_unlock(dev->parent);
1755} 1755}
1756 1756
1757static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) 1757static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
@@ -3457,8 +3457,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3457 struct device *dev = acpi_desc->dev; 3457 struct device *dev = acpi_desc->dev;
3458 3458
3459 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3459 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3460 device_lock(dev); 3460 nfit_device_lock(dev);
3461 device_unlock(dev); 3461 nfit_device_unlock(dev);
3462 3462
3463 /* Bounce the init_mutex to complete initial registration */ 3463 /* Bounce the init_mutex to complete initial registration */
3464 mutex_lock(&acpi_desc->init_mutex); 3464 mutex_lock(&acpi_desc->init_mutex);
@@ -3602,8 +3602,8 @@ void acpi_nfit_shutdown(void *data)
3602 * acpi_nfit_ars_rescan() submissions have had a chance to 3602 * acpi_nfit_ars_rescan() submissions have had a chance to
3603 * either submit or see ->cancel set. 3603 * either submit or see ->cancel set.
3604 */ 3604 */
3605 device_lock(bus_dev); 3605 nfit_device_lock(bus_dev);
3606 device_unlock(bus_dev); 3606 nfit_device_unlock(bus_dev);
3607 3607
3608 flush_workqueue(nfit_wq); 3608 flush_workqueue(nfit_wq);
3609} 3609}
@@ -3746,9 +3746,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3746 3746
3747static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 3747static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3748{ 3748{
3749 device_lock(&adev->dev); 3749 nfit_device_lock(&adev->dev);
3750 __acpi_nfit_notify(&adev->dev, adev->handle, event); 3750 __acpi_nfit_notify(&adev->dev, adev->handle, event);
3751 device_unlock(&adev->dev); 3751 nfit_device_unlock(&adev->dev);
3752} 3752}
3753 3753
3754static const struct acpi_device_id acpi_nfit_ids[] = { 3754static const struct acpi_device_id acpi_nfit_ids[] = {
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 6ee2b02af73e..24241941181c 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -312,6 +312,30 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
312 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 312 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
313} 313}
314 314
315#ifdef CONFIG_PROVE_LOCKING
316static inline void nfit_device_lock(struct device *dev)
317{
318 device_lock(dev);
319 mutex_lock(&dev->lockdep_mutex);
320}
321
322static inline void nfit_device_unlock(struct device *dev)
323{
324 mutex_unlock(&dev->lockdep_mutex);
325 device_unlock(dev);
326}
327#else
328static inline void nfit_device_lock(struct device *dev)
329{
330 device_lock(dev);
331}
332
333static inline void nfit_device_unlock(struct device *dev)
334{
335 device_unlock(dev);
336}
337#endif
338
315const guid_t *to_nfit_uuid(enum nfit_uuids id); 339const guid_t *to_nfit_uuid(enum nfit_uuids id);
316int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); 340int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
317void acpi_nfit_shutdown(void *data); 341void acpi_nfit_shutdown(void *data);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0e28270b0fd8..aad6be5c0af0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void)
2204 acpi_gpe_apply_masked_gpes(); 2204 acpi_gpe_apply_masked_gpes();
2205 acpi_update_all_gpes(); 2205 acpi_update_all_gpes();
2206 2206
2207 /*
2208 * Although we call __add_memory() that is documented to require the
2209 * device_hotplug_lock, it is not necessary here because this is an
2210 * early code when userspace or any other code path cannot trigger
2211 * hotplug/hotunplug operations.
2212 */
2207 mutex_lock(&acpi_scan_lock); 2213 mutex_lock(&acpi_scan_lock);
2208 /* 2214 /*
2209 * Enumerate devices in the ACPI namespace. 2215 * Enumerate devices in the ACPI namespace.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 38a59a630cd4..dc1c83eafc22 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc,
2988 else 2988 else
2989 return_error = BR_DEAD_REPLY; 2989 return_error = BR_DEAD_REPLY;
2990 mutex_unlock(&context->context_mgr_node_lock); 2990 mutex_unlock(&context->context_mgr_node_lock);
2991 if (target_node && target_proc == proc) { 2991 if (target_node && target_proc->pid == proc->pid) {
2992 binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2992 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2993 proc->pid, thread->pid); 2993 proc->pid, thread->pid);
2994 return_error = BR_FAILED_REPLY; 2994 return_error = BR_FAILED_REPLY;
@@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc,
3239 buffer_offset = off_start_offset; 3239 buffer_offset = off_start_offset;
3240 off_end_offset = off_start_offset + tr->offsets_size; 3240 off_end_offset = off_start_offset + tr->offsets_size;
3241 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); 3241 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3242 sg_buf_end_offset = sg_buf_offset + extra_buffers_size; 3242 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3243 ALIGN(secctx_sz, sizeof(u64));
3243 off_min = 0; 3244 off_min = 0;
3244 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; 3245 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3245 buffer_offset += sizeof(binder_size_t)) { 3246 buffer_offset += sizeof(binder_size_t)) {
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 72312ad2e142..9e9583a6bba9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
338 hpriv->phys[port] = NULL; 338 hpriv->phys[port] = NULL;
339 rc = 0; 339 rc = 0;
340 break; 340 break;
341 case -EPROBE_DEFER:
342 /* Do not complain yet */
343 break;
341 344
342 default: 345 default:
343 dev_err(dev, 346 dev_err(dev,
@@ -408,7 +411,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
408 hpriv->mmio = devm_ioremap_resource(dev, 411 hpriv->mmio = devm_ioremap_resource(dev,
409 platform_get_resource(pdev, IORESOURCE_MEM, 0)); 412 platform_get_resource(pdev, IORESOURCE_MEM, 0));
410 if (IS_ERR(hpriv->mmio)) { 413 if (IS_ERR(hpriv->mmio)) {
411 dev_err(dev, "no mmio space\n");
412 rc = PTR_ERR(hpriv->mmio); 414 rc = PTR_ERR(hpriv->mmio);
413 goto err_out; 415 goto err_out;
414 } 416 }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 391ac0503dc0..76d0f9de767b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1786,6 +1786,21 @@ nothing_to_do:
1786 return 1; 1786 return 1;
1787} 1787}
1788 1788
1789static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
1790{
1791 struct request *rq = scmd->request;
1792 u32 req_blocks;
1793
1794 if (!blk_rq_is_passthrough(rq))
1795 return true;
1796
1797 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
1798 if (n_blocks > req_blocks)
1799 return false;
1800
1801 return true;
1802}
1803
1789/** 1804/**
1790 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one 1805 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1791 * @qc: Storage for translated ATA taskfile 1806 * @qc: Storage for translated ATA taskfile
@@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1830 scsi_10_lba_len(cdb, &block, &n_block); 1845 scsi_10_lba_len(cdb, &block, &n_block);
1831 if (cdb[1] & (1 << 3)) 1846 if (cdb[1] & (1 << 3))
1832 tf_flags |= ATA_TFLAG_FUA; 1847 tf_flags |= ATA_TFLAG_FUA;
1848 if (!ata_check_nblocks(scmd, n_block))
1849 goto invalid_fld;
1833 break; 1850 break;
1834 case READ_6: 1851 case READ_6:
1835 case WRITE_6: 1852 case WRITE_6:
@@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1844 */ 1861 */
1845 if (!n_block) 1862 if (!n_block)
1846 n_block = 256; 1863 n_block = 256;
1864 if (!ata_check_nblocks(scmd, n_block))
1865 goto invalid_fld;
1847 break; 1866 break;
1848 case READ_16: 1867 case READ_16:
1849 case WRITE_16: 1868 case WRITE_16:
@@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1854 scsi_16_lba_len(cdb, &block, &n_block); 1873 scsi_16_lba_len(cdb, &block, &n_block);
1855 if (cdb[1] & (1 << 3)) 1874 if (cdb[1] & (1 << 3))
1856 tf_flags |= ATA_TFLAG_FUA; 1875 tf_flags |= ATA_TFLAG_FUA;
1876 if (!ata_check_nblocks(scmd, n_block))
1877 goto invalid_fld;
1857 break; 1878 break;
1858 default: 1879 default:
1859 DPRINTK("no-byte command\n"); 1880 DPRINTK("no-byte command\n");
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 10aa27882142..4f115adb4ee8 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
658 unsigned int offset; 658 unsigned int offset;
659 unsigned char *buf; 659 unsigned char *buf;
660 660
661 if (!qc->cursg) {
662 qc->curbytes = qc->nbytes;
663 return;
664 }
661 if (qc->curbytes == qc->nbytes - qc->sect_size) 665 if (qc->curbytes == qc->nbytes - qc->sect_size)
662 ap->hsm_task_state = HSM_ST_LAST; 666 ap->hsm_task_state = HSM_ST_LAST;
663 667
@@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
683 687
684 if (qc->cursg_ofs == qc->cursg->length) { 688 if (qc->cursg_ofs == qc->cursg->length) {
685 qc->cursg = sg_next(qc->cursg); 689 qc->cursg = sg_next(qc->cursg);
690 if (!qc->cursg)
691 ap->hsm_task_state = HSM_ST_LAST;
686 qc->cursg_ofs = 0; 692 qc->cursg_ofs = 0;
687 } 693 }
688} 694}
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2dd9af..eefda51f97d3 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
56 unsigned int ret; 56 unsigned int ret;
57 struct rm_feature_desc *desc; 57 struct rm_feature_desc *desc;
58 struct ata_taskfile tf; 58 struct ata_taskfile tf;
59 static const char cdb[] = { GPCMD_GET_CONFIGURATION, 59 static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
60 2, /* only 1 feature descriptor requested */ 60 2, /* only 1 feature descriptor requested */
61 0, 3, /* 3, removable medium feature */ 61 0, 3, /* 3, removable medium feature */
62 0, 0, 0,/* reserved */ 62 0, 0, 0,/* reserved */
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 7c37f2ff09e4..deae466395de 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
158static int rb532_pata_driver_remove(struct platform_device *pdev) 158static int rb532_pata_driver_remove(struct platform_device *pdev)
159{ 159{
160 struct ata_host *ah = platform_get_drvdata(pdev); 160 struct ata_host *ah = platform_get_drvdata(pdev);
161 struct rb532_cf_info *info = ah->private_data;
162 161
163 ata_host_detach(ah); 162 ata_host_detach(ah);
164 163
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa577437..8c37294f1d1e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
200 make the card work). 200 make the card work).
201 201
202config ATM_NICSTAR_USE_IDT77105 202config ATM_NICSTAR_USE_IDT77105
203 bool "Use IDT77015 PHY driver (25Mbps)" 203 bool "Use IDT77105 PHY driver (25Mbps)"
204 depends on ATM_NICSTAR 204 depends on ATM_NICSTAR
205 help 205 help
206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In 206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 302cf0ba1600..8c7a996d1f16 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -63,6 +63,7 @@
63#include <asm/byteorder.h> 63#include <asm/byteorder.h>
64#include <linux/vmalloc.h> 64#include <linux/vmalloc.h>
65#include <linux/jiffies.h> 65#include <linux/jiffies.h>
66#include <linux/nospec.h>
66#include "iphase.h" 67#include "iphase.h"
67#include "suni.h" 68#include "suni.h"
68#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) 69#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2760 } 2761 }
2761 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 2762 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2762 board = ia_cmds.status; 2763 board = ia_cmds.status;
2763 if ((board < 0) || (board > iadev_count)) 2764
2764 board = 0; 2765 if ((board < 0) || (board > iadev_count))
2766 board = 0;
2767 board = array_index_nospec(board, iadev_count + 1);
2768
2765 iadev = ia_dev[board]; 2769 iadev = ia_dev[board];
2766 switch (ia_cmds.cmd) { 2770 switch (ia_cmds.cmd) {
2767 case MEMDUMP: 2771 case MEMDUMP:
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index dd61fdd400f0..68489d1f00bb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
448choice 448choice
449 prompt "Backlight initial state" 449 prompt "Backlight initial state"
450 default CHARLCD_BL_FLASH 450 default CHARLCD_BL_FLASH
451 ---help---
452 Select the initial backlight state on boot or module load.
453
454 Previously, there was no option for this: the backlight flashed
455 briefly on init. Now you can also turn it off/on.
451 456
452 config CHARLCD_BL_OFF 457 config CHARLCD_BL_OFF
453 bool "Off" 458 bool "Off"
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 92745efefb54..bef6b85778b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -20,7 +20,7 @@
20 20
21#include <generated/utsrelease.h> 21#include <generated/utsrelease.h>
22 22
23#include <misc/charlcd.h> 23#include "charlcd.h"
24 24
25#define LCD_MINOR 156 25#define LCD_MINOR 156
26 26
diff --git a/include/misc/charlcd.h b/drivers/auxdisplay/charlcd.h
index 8cf6c18b0adb..00911ad0f3de 100644
--- a/include/misc/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -6,6 +6,9 @@
6 * Copyright (C) 2016-2017 Glider bvba 6 * Copyright (C) 2016-2017 Glider bvba
7 */ 7 */
8 8
9#ifndef _CHARLCD_H
10#define _CHARLCD_H
11
9struct charlcd { 12struct charlcd {
10 const struct charlcd_ops *ops; 13 const struct charlcd_ops *ops;
11 const unsigned char *char_conv; /* Optional */ 14 const unsigned char *char_conv; /* Optional */
@@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
37int charlcd_unregister(struct charlcd *lcd); 40int charlcd_unregister(struct charlcd *lcd);
38 41
39void charlcd_poke(struct charlcd *lcd); 42void charlcd_poke(struct charlcd *lcd);
43
44#endif /* CHARLCD_H */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index ab15b64707ad..bcbe13092327 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -14,8 +14,7 @@
14#include <linux/property.h> 14#include <linux/property.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <misc/charlcd.h> 17#include "charlcd.h"
18
19 18
20enum hd44780_pin { 19enum hd44780_pin {
21 /* Order does matter due to writing to GPIO array subsets! */ 20 /* Order does matter due to writing to GPIO array subsets! */
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 9c0bb771751d..a2fcde582e2a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -74,7 +74,7 @@ struct ht16k33_priv {
74 struct ht16k33_fbdev fbdev; 74 struct ht16k33_fbdev fbdev;
75}; 75};
76 76
77static struct fb_fix_screeninfo ht16k33_fb_fix = { 77static const struct fb_fix_screeninfo ht16k33_fb_fix = {
78 .id = DRIVER_NAME, 78 .id = DRIVER_NAME,
79 .type = FB_TYPE_PACKED_PIXELS, 79 .type = FB_TYPE_PACKED_PIXELS,
80 .visual = FB_VISUAL_MONO10, 80 .visual = FB_VISUAL_MONO10,
@@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
85 .accel = FB_ACCEL_NONE, 85 .accel = FB_ACCEL_NONE,
86}; 86};
87 87
88static struct fb_var_screeninfo ht16k33_fb_var = { 88static const struct fb_var_screeninfo ht16k33_fb_var = {
89 .xres = HT16K33_MATRIX_LED_MAX_ROWS, 89 .xres = HT16K33_MATRIX_LED_MAX_ROWS,
90 .yres = HT16K33_MATRIX_LED_MAX_COLS, 90 .yres = HT16K33_MATRIX_LED_MAX_COLS,
91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, 91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e06de63497cf..85965953683e 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -55,7 +55,7 @@
55#include <linux/io.h> 55#include <linux/io.h>
56#include <linux/uaccess.h> 56#include <linux/uaccess.h>
57 57
58#include <misc/charlcd.h> 58#include "charlcd.h"
59 59
60#define KEYPAD_MINOR 185 60#define KEYPAD_MINOR 185
61 61
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
1617 return; 1617 return;
1618 1618
1619err_lcd_unreg: 1619err_lcd_unreg:
1620 if (scan_timer.function)
1621 del_timer_sync(&scan_timer);
1620 if (lcd.enabled) 1622 if (lcd.enabled)
1621 charlcd_unregister(lcd.charlcd); 1623 charlcd_unregister(lcd.charlcd);
1622err_unreg_device: 1624err_unreg_device:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index da84a73f2ba6..1669d41fcddc 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1663,6 +1663,9 @@ void device_initialize(struct device *dev)
1663 kobject_init(&dev->kobj, &device_ktype); 1663 kobject_init(&dev->kobj, &device_ktype);
1664 INIT_LIST_HEAD(&dev->dma_pools); 1664 INIT_LIST_HEAD(&dev->dma_pools);
1665 mutex_init(&dev->mutex); 1665 mutex_init(&dev->mutex);
1666#ifdef CONFIG_PROVE_LOCKING
1667 mutex_init(&dev->lockdep_mutex);
1668#endif
1666 lockdep_set_novalidate_class(&dev->mutex); 1669 lockdep_set_novalidate_class(&dev->mutex);
1667 spin_lock_init(&dev->devres_lock); 1670 spin_lock_init(&dev->devres_lock);
1668 INIT_LIST_HEAD(&dev->devres_head); 1671 INIT_LIST_HEAD(&dev->devres_head);
@@ -1820,12 +1823,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
1820 */ 1823 */
1821static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 1824static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
1822{ 1825{
1826 unsigned int ref;
1827
1823 /* see if we live in a "glue" directory */ 1828 /* see if we live in a "glue" directory */
1824 if (!live_in_glue_dir(glue_dir, dev)) 1829 if (!live_in_glue_dir(glue_dir, dev))
1825 return; 1830 return;
1826 1831
1827 mutex_lock(&gdp_mutex); 1832 mutex_lock(&gdp_mutex);
1828 if (!kobject_has_children(glue_dir)) 1833 /**
1834 * There is a race condition between removing glue directory
1835 * and adding a new device under the glue directory.
1836 *
1837 * CPU1: CPU2:
1838 *
1839 * device_add()
1840 * get_device_parent()
1841 * class_dir_create_and_add()
1842 * kobject_add_internal()
1843 * create_dir() // create glue_dir
1844 *
1845 * device_add()
1846 * get_device_parent()
1847 * kobject_get() // get glue_dir
1848 *
1849 * device_del()
1850 * cleanup_glue_dir()
1851 * kobject_del(glue_dir)
1852 *
1853 * kobject_add()
1854 * kobject_add_internal()
1855 * create_dir() // in glue_dir
1856 * sysfs_create_dir_ns()
1857 * kernfs_create_dir_ns(sd)
1858 *
1859 * sysfs_remove_dir() // glue_dir->sd=NULL
1860 * sysfs_put() // free glue_dir->sd
1861 *
1862 * // sd is freed
1863 * kernfs_new_node(sd)
1864 * kernfs_get(glue_dir)
1865 * kernfs_add_one()
1866 * kernfs_put()
1867 *
1868 * Before CPU1 remove last child device under glue dir, if CPU2 add
1869 * a new device under glue dir, the glue_dir kobject reference count
1870 * will be increase to 2 in kobject_get(k). And CPU2 has been called
1871 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
1872 * and sysfs_put(). This result in glue_dir->sd is freed.
1873 *
1874 * Then the CPU2 will see a stale "empty" but still potentially used
1875 * glue dir around in kernfs_new_node().
1876 *
1877 * In order to avoid this happening, we also should make sure that
1878 * kernfs_node for glue_dir is released in CPU1 only when refcount
1879 * for glue_dir kobj is 1.
1880 */
1881 ref = kref_read(&glue_dir->kref);
1882 if (!kobject_has_children(glue_dir) && !--ref)
1829 kobject_del(glue_dir); 1883 kobject_del(glue_dir);
1830 kobject_put(glue_dir); 1884 kobject_put(glue_dir);
1831 mutex_unlock(&gdp_mutex); 1885 mutex_unlock(&gdp_mutex);
@@ -2211,6 +2265,24 @@ void put_device(struct device *dev)
2211} 2265}
2212EXPORT_SYMBOL_GPL(put_device); 2266EXPORT_SYMBOL_GPL(put_device);
2213 2267
2268bool kill_device(struct device *dev)
2269{
2270 /*
2271 * Require the device lock and set the "dead" flag to guarantee that
2272 * the update behavior is consistent with the other bitfields near
2273 * it and that we cannot have an asynchronous probe routine trying
2274 * to run while we are tearing out the bus/class/sysfs from
2275 * underneath the device.
2276 */
2277 lockdep_assert_held(&dev->mutex);
2278
2279 if (dev->p->dead)
2280 return false;
2281 dev->p->dead = true;
2282 return true;
2283}
2284EXPORT_SYMBOL_GPL(kill_device);
2285
2214/** 2286/**
2215 * device_del - delete device from system. 2287 * device_del - delete device from system.
2216 * @dev: device. 2288 * @dev: device.
@@ -2230,15 +2302,8 @@ void device_del(struct device *dev)
2230 struct kobject *glue_dir = NULL; 2302 struct kobject *glue_dir = NULL;
2231 struct class_interface *class_intf; 2303 struct class_interface *class_intf;
2232 2304
2233 /*
2234 * Hold the device lock and set the "dead" flag to guarantee that
2235 * the update behavior is consistent with the other bitfields near
2236 * it and that we cannot have an asynchronous probe routine trying
2237 * to run while we are tearing out the bus/class/sysfs from
2238 * underneath the device.
2239 */
2240 device_lock(dev); 2305 device_lock(dev);
2241 dev->p->dead = true; 2306 kill_device(dev);
2242 device_unlock(dev); 2307 device_unlock(dev);
2243 2308
2244 /* Notify clients of device removal. This call must come 2309 /* Notify clients of device removal. This call must come
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7048a41973ed..7ecd590e67fe 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -141,8 +141,8 @@ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
141int fw_map_paged_buf(struct fw_priv *fw_priv); 141int fw_map_paged_buf(struct fw_priv *fw_priv);
142#else 142#else
143static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} 143static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
144int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } 144static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
145int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } 145static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
146#endif 146#endif
147 147
148#endif /* __FIRMWARE_LOADER_H */ 148#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 506a0175a5a7..ec974ba9c0c4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
157 * the device will only expose one IRQ, and this fallback 157 * the device will only expose one IRQ, and this fallback
158 * allows a common code path across either kind of resource. 158 * allows a common code path across either kind of resource.
159 */ 159 */
160 if (num == 0 && has_acpi_companion(&dev->dev)) 160 if (num == 0 && has_acpi_companion(&dev->dev)) {
161 return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 161 int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
162
163 /* Our callers expect -ENXIO for missing IRQs. */
164 if (ret >= 0 || ret == -EPROBE_DEFER)
165 return ret;
166 }
162 167
163 return -ENXIO; 168 return -ENXIO;
164#endif 169#endif
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index a4984136c19d..0fd6f97ee523 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@ config REGMAP_IRQ
44 44
45config REGMAP_SOUNDWIRE 45config REGMAP_SOUNDWIRE
46 tristate 46 tristate
47 depends on SOUNDWIRE_BUS 47 depends on SOUNDWIRE
48 48
49config REGMAP_SCCB 49config REGMAP_SCCB
50 tristate 50 tristate
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 5b49f1b33ebe..e2ea2356da06 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting)
323 } 323 }
324 324
325 flush_scheduled_work(); 325 flush_scheduled_work();
326 /* pass one: without sleeping, do aoedev_downdev */ 326 /* pass one: do aoedev_downdev, which might sleep */
327restart1:
327 spin_lock_irqsave(&devlist_lock, flags); 328 spin_lock_irqsave(&devlist_lock, flags);
328 for (d = devlist; d; d = d->next) { 329 for (d = devlist; d; d = d->next) {
329 spin_lock(&d->lock); 330 spin_lock(&d->lock);
331 if (d->flags & DEVFL_TKILL)
332 goto cont;
333
330 if (exiting) { 334 if (exiting) {
331 /* unconditionally take each device down */ 335 /* unconditionally take each device down */
332 } else if (specified) { 336 } else if (specified) {
@@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting)
338 || d->ref) 342 || d->ref)
339 goto cont; 343 goto cont;
340 344
345 spin_unlock(&d->lock);
346 spin_unlock_irqrestore(&devlist_lock, flags);
341 aoedev_downdev(d); 347 aoedev_downdev(d);
342 d->flags |= DEVFL_TKILL; 348 d->flags |= DEVFL_TKILL;
349 goto restart1;
343cont: 350cont:
344 spin_unlock(&d->lock); 351 spin_unlock(&d->lock);
345 } 352 }
@@ -348,7 +355,7 @@ cont:
348 /* pass two: call freedev, which might sleep, 355 /* pass two: call freedev, which might sleep,
349 * for aoedevs marked with DEVFL_TKILL 356 * for aoedevs marked with DEVFL_TKILL
350 */ 357 */
351restart: 358restart2:
352 spin_lock_irqsave(&devlist_lock, flags); 359 spin_lock_irqsave(&devlist_lock, flags);
353 for (d = devlist; d; d = d->next) { 360 for (d = devlist; d; d = d->next) {
354 spin_lock(&d->lock); 361 spin_lock(&d->lock);
@@ -357,7 +364,7 @@ restart:
357 spin_unlock(&d->lock); 364 spin_unlock(&d->lock);
358 spin_unlock_irqrestore(&devlist_lock, flags); 365 spin_unlock_irqrestore(&devlist_lock, flags);
359 freedev(d); 366 freedev(d);
360 goto restart; 367 goto restart2;
361 } 368 }
362 spin_unlock(&d->lock); 369 spin_unlock(&d->lock);
363 } 370 }
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 85f20e371f2f..bd7d3bb8b890 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
1726 /* MSch: invalidate default_params */ 1726 /* MSch: invalidate default_params */
1727 default_params[drive].blocks = 0; 1727 default_params[drive].blocks = 0;
1728 set_capacity(floppy->disk, MAX_DISK_SIZE * 2); 1728 set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
1729 /* Fall through */
1729 case FDFMTEND: 1730 case FDFMTEND:
1730 case FDFLUSH: 1731 case FDFLUSH:
1731 /* invalidate the buffer track to force a reread */ 1732 /* invalidate the buffer track to force a reread */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9bd4ddd12b25..5b248763a672 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
322 thi->name[0], 322 thi->name[0],
323 resource->name); 323 resource->name);
324 324
325 allow_kernel_signal(DRBD_SIGKILL);
326 allow_kernel_signal(SIGXCPU);
325restart: 327restart:
326 retval = thi->function(thi); 328 retval = thi->function(thi);
327 329
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 90ebfcae0ce6..2b3103c30857 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
5417 unsigned int key_len; 5417 unsigned int key_len;
5418 char secret[SHARED_SECRET_MAX]; /* 64 byte */ 5418 char secret[SHARED_SECRET_MAX]; /* 64 byte */
5419 unsigned int resp_size; 5419 unsigned int resp_size;
5420 SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); 5420 struct shash_desc *desc;
5421 struct packet_info pi; 5421 struct packet_info pi;
5422 struct net_conf *nc; 5422 struct net_conf *nc;
5423 int err, rv; 5423 int err, rv;
@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
5430 memcpy(secret, nc->shared_secret, key_len); 5430 memcpy(secret, nc->shared_secret, key_len);
5431 rcu_read_unlock(); 5431 rcu_read_unlock();
5432 5432
5433 desc = kmalloc(sizeof(struct shash_desc) +
5434 crypto_shash_descsize(connection->cram_hmac_tfm),
5435 GFP_KERNEL);
5436 if (!desc) {
5437 rv = -1;
5438 goto fail;
5439 }
5433 desc->tfm = connection->cram_hmac_tfm; 5440 desc->tfm = connection->cram_hmac_tfm;
5434 5441
5435 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); 5442 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
5571 kfree(peers_ch); 5578 kfree(peers_ch);
5572 kfree(response); 5579 kfree(response);
5573 kfree(right_response); 5580 kfree(right_response);
5574 shash_desc_zero(desc); 5581 if (desc) {
5582 shash_desc_zero(desc);
5583 kfree(desc);
5584 }
5575 5585
5576 return rv; 5586 return rv;
5577} 5587}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44c9985f352a..ab7ca5989097 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -885,7 +885,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
885 885
886static int loop_kthread_worker_fn(void *worker_ptr) 886static int loop_kthread_worker_fn(void *worker_ptr)
887{ 887{
888 current->flags |= PF_LESS_THROTTLE; 888 current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
889 return kthread_worker_fn(worker_ptr); 889 return kthread_worker_fn(worker_ptr);
890} 890}
891 891
@@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
924 struct file *file; 924 struct file *file;
925 struct inode *inode; 925 struct inode *inode;
926 struct address_space *mapping; 926 struct address_space *mapping;
927 struct block_device *claimed_bdev = NULL;
927 int lo_flags = 0; 928 int lo_flags = 0;
928 int error; 929 int error;
929 loff_t size; 930 loff_t size;
@@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
942 * here to avoid changing device under exclusive owner. 943 * here to avoid changing device under exclusive owner.
943 */ 944 */
944 if (!(mode & FMODE_EXCL)) { 945 if (!(mode & FMODE_EXCL)) {
945 bdgrab(bdev); 946 claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
946 error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd); 947 if (IS_ERR(claimed_bdev)) {
947 if (error) 948 error = PTR_ERR(claimed_bdev);
948 goto out_putf; 949 goto out_putf;
950 }
949 } 951 }
950 952
951 error = mutex_lock_killable(&loop_ctl_mutex); 953 error = mutex_lock_killable(&loop_ctl_mutex);
@@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
1015 mutex_unlock(&loop_ctl_mutex); 1017 mutex_unlock(&loop_ctl_mutex);
1016 if (partscan) 1018 if (partscan)
1017 loop_reread_partitions(lo, bdev); 1019 loop_reread_partitions(lo, bdev);
1018 if (!(mode & FMODE_EXCL)) 1020 if (claimed_bdev)
1019 blkdev_put(bdev, mode | FMODE_EXCL); 1021 bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
1020 return 0; 1022 return 0;
1021 1023
1022out_unlock: 1024out_unlock:
1023 mutex_unlock(&loop_ctl_mutex); 1025 mutex_unlock(&loop_ctl_mutex);
1024out_bdev: 1026out_bdev:
1025 if (!(mode & FMODE_EXCL)) 1027 if (claimed_bdev)
1026 blkdev_put(bdev, mode | FMODE_EXCL); 1028 bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
1027out_putf: 1029out_putf:
1028 fput(file); 1030 fput(file);
1029out: 1031out:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9bcde2325893..e21d2ded732b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1231 struct block_device *bdev) 1231 struct block_device *bdev)
1232{ 1232{
1233 sock_shutdown(nbd); 1233 sock_shutdown(nbd);
1234 kill_bdev(bdev); 1234 __invalidate_device(bdev, true);
1235 nbd_bdev_reset(bdev); 1235 nbd_bdev_reset(bdev);
1236 if (test_and_clear_bit(NBD_HAS_CONFIG_REF, 1236 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1237 &nbd->config->runtime_flags)) 1237 &nbd->config->runtime_flags))
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3327192bb71f..c8fb886aebd4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3038,6 +3038,17 @@ again:
3038 } 3038 }
3039 return true; 3039 return true;
3040 case RBD_OBJ_READ_PARENT: 3040 case RBD_OBJ_READ_PARENT:
3041 /*
3042 * The parent image is read only up to the overlap -- zero-fill
3043 * from the overlap to the end of the request.
3044 */
3045 if (!*result) {
3046 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3047
3048 if (obj_overlap < obj_req->ex.oe_len)
3049 rbd_obj_zero_range(obj_req, obj_overlap,
3050 obj_req->ex.oe_len - obj_overlap);
3051 }
3041 return true; 3052 return true;
3042 default: 3053 default:
3043 BUG(); 3054 BUG();
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3ac6a5d18071..b90dbcd99c03 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
965 } 965 }
966 } 966 }
967 967
968 err = -ENOMEM;
968 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 969 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
969 req = kzalloc(sizeof(*req), GFP_KERNEL); 970 req = kzalloc(sizeof(*req), GFP_KERNEL);
970 if (!req) 971 if (!req)
@@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
987 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); 988 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
988 if (err) { 989 if (err) {
989 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); 990 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
990 return err; 991 goto fail;
991 } 992 }
992 993
993 return 0; 994 return 0;
@@ -1007,8 +1008,7 @@ fail:
1007 } 1008 }
1008 kfree(req); 1009 kfree(req);
1009 } 1010 }
1010 return -ENOMEM; 1011 return err;
1011
1012} 1012}
1013 1013
1014static int connect_ring(struct backend_info *be) 1014static int connect_ring(struct backend_info *be)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8b33128dccee..0875470a7806 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
99 return 0; 99 return 0;
100} 100}
101 101
102int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
103{
104 struct sk_buff *skb;
105 int err;
106
107 bt_dev_dbg(hdev, "QCA pre shutdown cmd");
108
109 skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
110 NULL, HCI_INIT_TIMEOUT);
111 if (IS_ERR(skb)) {
112 err = PTR_ERR(skb);
113 bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
114 return err;
115 }
116
117 kfree_skb(skb);
118
119 return 0;
120}
121EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
122
102static void qca_tlv_check_data(struct rome_config *config, 123static void qca_tlv_check_data(struct rome_config *config,
103 const struct firmware *fw) 124 const struct firmware *fw)
104{ 125{
@@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
119 BT_DBG("Length\t\t : %d bytes", length); 140 BT_DBG("Length\t\t : %d bytes", length);
120 141
121 config->dnld_mode = ROME_SKIP_EVT_NONE; 142 config->dnld_mode = ROME_SKIP_EVT_NONE;
143 config->dnld_type = ROME_SKIP_EVT_NONE;
122 144
123 switch (config->type) { 145 switch (config->type) {
124 case TLV_TYPE_PATCH: 146 case TLV_TYPE_PATCH:
@@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
268 290
269 evt = skb_put(skb, sizeof(*evt)); 291 evt = skb_put(skb, sizeof(*evt));
270 evt->ncmd = 1; 292 evt->ncmd = 1;
271 evt->opcode = QCA_HCI_CC_OPCODE; 293 evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
272 294
273 skb_put_u8(skb, QCA_HCI_CC_SUCCESS); 295 skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
274 296
@@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
323 */ 345 */
324 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC || 346 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
325 config->dnld_type == ROME_SKIP_EVT_VSE) 347 config->dnld_type == ROME_SKIP_EVT_VSE)
326 return qca_inject_cmd_complete_event(hdev); 348 ret = qca_inject_cmd_complete_event(hdev);
327 349
328out: 350out:
329 release_firmware(fw); 351 release_firmware(fw);
@@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
388 return err; 410 return err;
389 } 411 }
390 412
413 /* Give the controller some time to get ready to receive the NVM */
414 msleep(10);
415
391 /* Download NVM configuration */ 416 /* Download NVM configuration */
392 config.type = TLV_TYPE_NVM; 417 config.type = TLV_TYPE_NVM;
393 if (firmware_name) 418 if (firmware_name)
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 6a291a7a5d96..69c5315a65fd 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -13,6 +13,7 @@
13#define EDL_PATCH_TLV_REQ_CMD (0x1E) 13#define EDL_PATCH_TLV_REQ_CMD (0x1E)
14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) 14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
15#define MAX_SIZE_PER_TLV_SEGMENT (243) 15#define MAX_SIZE_PER_TLV_SEGMENT (243)
16#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
16 17
17#define EDL_CMD_REQ_RES_EVT (0x00) 18#define EDL_CMD_REQ_RES_EVT (0x00)
18#define EDL_PATCH_VER_RES_EVT (0x19) 19#define EDL_PATCH_VER_RES_EVT (0x19)
@@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
135 const char *firmware_name); 136 const char *firmware_name);
136int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); 137int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
137int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); 138int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
139int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
138static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) 140static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
139{ 141{
140 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; 142 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
@@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
167{ 169{
168 return false; 170 return false;
169} 171}
172
173static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
174{
175 return -EOPNOTSUPP;
176}
170#endif 177#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3876fee6ad13..5cf0734eb31b 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
2762 fw_size = fw->size; 2762 fw_size = fw->size;
2763 2763
2764 /* The size of patch header is 30 bytes, should be skip */ 2764 /* The size of patch header is 30 bytes, should be skip */
2765 if (fw_size < 30) 2765 if (fw_size < 30) {
2766 err = -EINVAL;
2766 goto err_release_fw; 2767 goto err_release_fw;
2768 }
2767 2769
2768 fw_size -= 30; 2770 fw_size -= 30;
2769 fw_ptr += 30; 2771 fw_ptr += 30;
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index a55be205b91a..dbfe34664633 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
98 98
99 BT_DBG("hu %p", hu); 99 BT_DBG("hu %p", hu);
100 100
101 if (!hci_uart_has_flow_control(hu))
102 return -EOPNOTSUPP;
103
101 ath = kzalloc(sizeof(*ath), GFP_KERNEL); 104 ath = kzalloc(sizeof(*ath), GFP_KERNEL);
102 if (!ath) 105 if (!ath)
103 return -ENOMEM; 106 return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8905ad2edde7..ae2624fce913 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
406 406
407 bt_dev_dbg(hu->hdev, "hu %p", hu); 407 bt_dev_dbg(hu->hdev, "hu %p", hu);
408 408
409 if (!hci_uart_has_flow_control(hu))
410 return -EOPNOTSUPP;
411
409 bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); 412 bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
410 if (!bcm) 413 if (!bcm)
411 return -ENOMEM; 414 return -ENOMEM;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 207bae5e0d46..31f25153087d 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
391 391
392 BT_DBG("hu %p", hu); 392 BT_DBG("hu %p", hu);
393 393
394 if (!hci_uart_has_flow_control(hu))
395 return -EOPNOTSUPP;
396
394 intel = kzalloc(sizeof(*intel), GFP_KERNEL); 397 intel = kzalloc(sizeof(*intel), GFP_KERNEL);
395 if (!intel) 398 if (!intel)
396 return -ENOMEM; 399 return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8950e07889fe..85a30fb9177b 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
292 return 0; 292 return 0;
293} 293}
294 294
295/* Check the underlying device or tty has flow control support */
296bool hci_uart_has_flow_control(struct hci_uart *hu)
297{
298 /* serdev nodes check if the needed operations are present */
299 if (hu->serdev)
300 return true;
301
302 if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
303 return true;
304
305 return false;
306}
307
295/* Flow control or un-flow control the device */ 308/* Flow control or un-flow control the device */
296void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) 309void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
297{ 310{
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index f98e5cc343b2..fbc3f7c3a5c7 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu)
59 59
60 BT_DBG("hu %p", hu); 60 BT_DBG("hu %p", hu);
61 61
62 if (!hci_uart_has_flow_control(hu))
63 return -EOPNOTSUPP;
64
62 mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); 65 mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
63 if (!mrvl) 66 if (!mrvl)
64 return -ENOMEM; 67 return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 9a5c9c1f9484..9a970fd1975a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu)
473 473
474 BT_DBG("hu %p qca_open", hu); 474 BT_DBG("hu %p qca_open", hu);
475 475
476 if (!hci_uart_has_flow_control(hu))
477 return -EOPNOTSUPP;
478
476 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); 479 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
477 if (!qca) 480 if (!qca)
478 return -ENOMEM; 481 return -ENOMEM;
@@ -702,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
702 unsigned long flags; 705 unsigned long flags;
703 struct qca_data *qca = hu->priv; 706 struct qca_data *qca = hu->priv;
704 707
705 BT_DBG("hu %p want to sleep", hu); 708 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
706 709
707 spin_lock_irqsave(&qca->hci_ibs_lock, flags); 710 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
708 711
@@ -717,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
717 break; 720 break;
718 721
719 case HCI_IBS_RX_ASLEEP: 722 case HCI_IBS_RX_ASLEEP:
720 /* Fall through */ 723 break;
721 724
722 default: 725 default:
723 /* Any other state is illegal */ 726 /* Any other state is illegal */
@@ -909,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
909 if (hdr->evt == HCI_EV_VENDOR) 912 if (hdr->evt == HCI_EV_VENDOR)
910 complete(&qca->drop_ev_comp); 913 complete(&qca->drop_ev_comp);
911 914
912 kfree(skb); 915 kfree_skb(skb);
913 916
914 return 0; 917 return 0;
915 } 918 }
@@ -1383,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev)
1383{ 1386{
1384 struct hci_uart *hu = hci_get_drvdata(hdev); 1387 struct hci_uart *hu = hci_get_drvdata(hdev);
1385 1388
1389 /* Perform pre shutdown command */
1390 qca_send_pre_shutdown_cmd(hdev);
1391
1386 qca_power_shutdown(hu); 1392 qca_power_shutdown(hu);
1387 return 0; 1393 return 0;
1388} 1394}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index f11af3912ce6..6ab631101019 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu);
104int hci_uart_init_ready(struct hci_uart *hu); 104int hci_uart_init_ready(struct hci_uart *hu);
105void hci_uart_init_work(struct work_struct *work); 105void hci_uart_init_work(struct work_struct *work);
106void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); 106void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
107bool hci_uart_has_flow_control(struct hci_uart *hu);
107void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); 108void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
108void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, 109void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
109 unsigned int oper_speed); 110 unsigned int oper_speed);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 19d7b6ff2f17..20c957185af2 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
456 size_t pdata_size; 456 size_t pdata_size;
457}; 457};
458 458
459static void hisi_lpc_acpi_remove(struct device *hostdev)
460{
461 struct acpi_device *adev = ACPI_COMPANION(hostdev);
462 struct acpi_device *child;
463
464 device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
465
466 list_for_each_entry(child, &adev->children, node)
467 acpi_device_clear_enumerated(child);
468}
469
459/* 470/*
460 * hisi_lpc_acpi_probe - probe children for ACPI FW 471 * hisi_lpc_acpi_probe - probe children for ACPI FW
461 * @hostdev: LPC host device pointer 472 * @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
555 return 0; 566 return 0;
556 567
557fail: 568fail:
558 device_for_each_child(hostdev, NULL, 569 hisi_lpc_acpi_remove(hostdev);
559 hisi_lpc_acpi_remove_subdev);
560 return ret; 570 return ret;
561} 571}
562 572
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
569{ 579{
570 return -ENODEV; 580 return -ENODEV;
571} 581}
582
583static void hisi_lpc_acpi_remove(struct device *hostdev)
584{
585}
572#endif // CONFIG_ACPI 586#endif // CONFIG_ACPI
573 587
574/* 588/*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
606 range->fwnode = dev->fwnode; 620 range->fwnode = dev->fwnode;
607 range->flags = LOGIC_PIO_INDIRECT; 621 range->flags = LOGIC_PIO_INDIRECT;
608 range->size = PIO_INDIRECT_SIZE; 622 range->size = PIO_INDIRECT_SIZE;
623 range->hostdata = lpcdev;
624 range->ops = &hisi_lpc_ops;
625 lpcdev->io_host = range;
609 626
610 ret = logic_pio_register_range(range); 627 ret = logic_pio_register_range(range);
611 if (ret) { 628 if (ret) {
612 dev_err(dev, "register IO range failed (%d)!\n", ret); 629 dev_err(dev, "register IO range failed (%d)!\n", ret);
613 return ret; 630 return ret;
614 } 631 }
615 lpcdev->io_host = range;
616 632
617 /* register the LPC host PIO resources */ 633 /* register the LPC host PIO resources */
618 if (acpi_device) 634 if (acpi_device)
619 ret = hisi_lpc_acpi_probe(dev); 635 ret = hisi_lpc_acpi_probe(dev);
620 else 636 else
621 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 637 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
622 if (ret) 638 if (ret) {
639 logic_pio_unregister_range(range);
623 return ret; 640 return ret;
641 }
624 642
625 lpcdev->io_host->hostdata = lpcdev; 643 dev_set_drvdata(dev, lpcdev);
626 lpcdev->io_host->ops = &hisi_lpc_ops;
627 644
628 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; 645 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
629 dev_info(dev, "registered range [%pa - %pa]\n", 646 dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
632 return ret; 649 return ret;
633} 650}
634 651
652static int hisi_lpc_remove(struct platform_device *pdev)
653{
654 struct device *dev = &pdev->dev;
655 struct acpi_device *acpi_device = ACPI_COMPANION(dev);
656 struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
657 struct logic_pio_hwaddr *range = lpcdev->io_host;
658
659 if (acpi_device)
660 hisi_lpc_acpi_remove(dev);
661 else
662 of_platform_depopulate(dev);
663
664 logic_pio_unregister_range(range);
665
666 return 0;
667}
668
635static const struct of_device_id hisi_lpc_of_match[] = { 669static const struct of_device_id hisi_lpc_of_match[] = {
636 { .compatible = "hisilicon,hip06-lpc", }, 670 { .compatible = "hisilicon,hip06-lpc", },
637 { .compatible = "hisilicon,hip07-lpc", }, 671 { .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
645 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), 679 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
646 }, 680 },
647 .probe = hisi_lpc_probe, 681 .probe = hisi_lpc_probe,
682 .remove = hisi_lpc_remove,
648}; 683};
649builtin_platform_driver(hisi_lpc_driver); 684builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e6deabd8305d..2db474ab4c6b 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
949 *best_mode = SYSC_IDLE_SMART_WKUP; 949 *best_mode = SYSC_IDLE_SMART_WKUP;
950 else if (idlemodes & BIT(SYSC_IDLE_SMART)) 950 else if (idlemodes & BIT(SYSC_IDLE_SMART))
951 *best_mode = SYSC_IDLE_SMART; 951 *best_mode = SYSC_IDLE_SMART;
952 else if (idlemodes & SYSC_IDLE_FORCE) 952 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
953 *best_mode = SYSC_IDLE_FORCE; 953 *best_mode = SYSC_IDLE_FORCE;
954 else 954 else
955 return -EINVAL; 955 return -EINVAL;
@@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), 1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1269 0xffff00f0, 0), 1269 0xffff00f0, 0),
1270 SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), 1270 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
1271 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
1271 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), 1272 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
1272 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), 1273 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
1273 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), 1274 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
@@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
1692 if (error) 1693 if (error)
1693 return 0; 1694 return 0;
1694 1695
1695 if (val) 1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1697 else
1698 ddata->cfg.sysc_val = ddata->cap->sysc_mask;
1699 1697
1700 return 0; 1698 return 0;
1701} 1699}
@@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev)
2385 2383
2386 error = sysc_init_dts_quirks(ddata); 2384 error = sysc_init_dts_quirks(ddata);
2387 if (error) 2385 if (error)
2388 goto unprepare; 2386 return error;
2389 2387
2390 error = sysc_map_and_check_registers(ddata); 2388 error = sysc_map_and_check_registers(ddata);
2391 if (error) 2389 if (error)
2392 goto unprepare; 2390 return error;
2393 2391
2394 error = sysc_init_sysc_mask(ddata); 2392 error = sysc_init_sysc_mask(ddata);
2395 if (error) 2393 if (error)
2396 goto unprepare; 2394 return error;
2397 2395
2398 error = sysc_init_idlemodes(ddata); 2396 error = sysc_init_idlemodes(ddata);
2399 if (error) 2397 if (error)
2400 goto unprepare; 2398 return error;
2401 2399
2402 error = sysc_init_syss_mask(ddata); 2400 error = sysc_init_syss_mask(ddata);
2403 if (error) 2401 if (error)
2404 goto unprepare; 2402 return error;
2405 2403
2406 error = sysc_init_pdata(ddata); 2404 error = sysc_init_pdata(ddata);
2407 if (error) 2405 if (error)
2408 goto unprepare; 2406 return error;
2409 2407
2410 sysc_init_early_quirks(ddata); 2408 sysc_init_early_quirks(ddata);
2411 2409
@@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev)
2415 2413
2416 error = sysc_init_resets(ddata); 2414 error = sysc_init_resets(ddata);
2417 if (error) 2415 if (error)
2418 return error; 2416 goto unprepare;
2419 2417
2420 error = sysc_init_module(ddata); 2418 error = sysc_init_module(ddata);
2421 if (error) 2419 if (error)
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 5c39f20378b8..9ac6671bb514 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
567 unsigned long long m; 567 unsigned long long m;
568 568
569 m = hpets->hp_tick_freq + (dis >> 1); 569 m = hpets->hp_tick_freq + (dis >> 1);
570 do_div(m, dis); 570 return div64_ul(m, dis);
571 return (unsigned long)m;
572} 571}
573 572
574static int 573static int
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 57204335c5f5..285e0b8f9a97 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
76 struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); 76 struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
77 struct ipmb_request_elem *queue_elem; 77 struct ipmb_request_elem *queue_elem;
78 struct ipmb_msg msg; 78 struct ipmb_msg msg;
79 ssize_t ret; 79 ssize_t ret = 0;
80 80
81 memset(&msg, 0, sizeof(msg)); 81 memset(&msg, 0, sizeof(msg));
82 82
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index d47ad10a35fe..4838c6a9f0f2 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip)
77 return chip->ops->go_idle(chip); 77 return chip->ops->go_idle(chip);
78} 78}
79 79
80static void tpm_clk_enable(struct tpm_chip *chip)
81{
82 if (chip->ops->clk_enable)
83 chip->ops->clk_enable(chip, true);
84}
85
86static void tpm_clk_disable(struct tpm_chip *chip)
87{
88 if (chip->ops->clk_enable)
89 chip->ops->clk_enable(chip, false);
90}
91
80/** 92/**
81 * tpm_chip_start() - power on the TPM 93 * tpm_chip_start() - power on the TPM
82 * @chip: a TPM chip to use 94 * @chip: a TPM chip to use
@@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip)
89{ 101{
90 int ret; 102 int ret;
91 103
92 if (chip->ops->clk_enable) 104 tpm_clk_enable(chip);
93 chip->ops->clk_enable(chip, true);
94 105
95 if (chip->locality == -1) { 106 if (chip->locality == -1) {
96 ret = tpm_request_locality(chip); 107 ret = tpm_request_locality(chip);
97 if (ret) { 108 if (ret) {
98 chip->ops->clk_enable(chip, false); 109 tpm_clk_disable(chip);
99 return ret; 110 return ret;
100 } 111 }
101 } 112 }
@@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip)
103 ret = tpm_cmd_ready(chip); 114 ret = tpm_cmd_ready(chip);
104 if (ret) { 115 if (ret) {
105 tpm_relinquish_locality(chip); 116 tpm_relinquish_locality(chip);
106 if (chip->ops->clk_enable) 117 tpm_clk_disable(chip);
107 chip->ops->clk_enable(chip, false);
108 return ret; 118 return ret;
109 } 119 }
110 120
@@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip)
124{ 134{
125 tpm_go_idle(chip); 135 tpm_go_idle(chip);
126 tpm_relinquish_locality(chip); 136 tpm_relinquish_locality(chip);
127 if (chip->ops->clk_enable) 137 tpm_clk_disable(chip);
128 chip->ops->clk_enable(chip, false);
129} 138}
130EXPORT_SYMBOL_GPL(tpm_chip_stop); 139EXPORT_SYMBOL_GPL(tpm_chip_stop);
131 140
@@ -545,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip)
545 return hwrng_register(&chip->hwrng); 554 return hwrng_register(&chip->hwrng);
546} 555}
547 556
557static int tpm_get_pcr_allocation(struct tpm_chip *chip)
558{
559 int rc;
560
561 rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
562 tpm2_get_pcr_allocation(chip) :
563 tpm1_get_pcr_allocation(chip);
564
565 if (rc > 0)
566 return -ENODEV;
567
568 return rc;
569}
570
548/* 571/*
549 * tpm_chip_register() - create a character device for the TPM chip 572 * tpm_chip_register() - create a character device for the TPM chip
550 * @chip: TPM chip to use. 573 * @chip: TPM chip to use.
@@ -564,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip)
564 if (rc) 587 if (rc)
565 return rc; 588 return rc;
566 rc = tpm_auto_startup(chip); 589 rc = tpm_auto_startup(chip);
590 if (rc) {
591 tpm_chip_stop(chip);
592 return rc;
593 }
594
595 rc = tpm_get_pcr_allocation(chip);
567 tpm_chip_stop(chip); 596 tpm_chip_stop(chip);
568 if (rc) 597 if (rc)
569 return rc; 598 return rc;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index e503ffc3aa39..a7fea3e0ca86 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
394ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, 394ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
395 const char *desc, size_t min_cap_length); 395 const char *desc, size_t min_cap_length);
396int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); 396int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
397int tpm1_get_pcr_allocation(struct tpm_chip *chip);
397unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); 398unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
398int tpm_pm_suspend(struct device *dev); 399int tpm_pm_suspend(struct device *dev);
399int tpm_pm_resume(struct device *dev); 400int tpm_pm_resume(struct device *dev);
@@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
449ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, 450ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
450 u32 *value, const char *desc); 451 u32 *value, const char *desc);
451 452
453ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
452int tpm2_auto_startup(struct tpm_chip *chip); 454int tpm2_auto_startup(struct tpm_chip *chip);
453void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); 455void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
454unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); 456unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index faacbe1ffa1a..149e953ca369 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip)
699 goto out; 699 goto out;
700 } 700 }
701 701
702 chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
703 GFP_KERNEL);
704 if (!chip->allocated_banks) {
705 rc = -ENOMEM;
706 goto out;
707 }
708
709 chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
710 chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
711 chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
712 chip->nr_allocated_banks = 1;
713
714 return rc; 702 return rc;
715out: 703out:
716 if (rc > 0) 704 if (rc > 0)
@@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
779 return rc; 767 return rc;
780} 768}
781 769
770/**
771 * tpm1_get_pcr_allocation() - initialize the allocated bank
772 * @chip: TPM chip to use.
773 *
774 * The function initializes the SHA1 allocated bank to extend PCR
775 *
776 * Return:
777 * * 0 on success,
778 * * < 0 on error.
779 */
780int tpm1_get_pcr_allocation(struct tpm_chip *chip)
781{
782 chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
783 GFP_KERNEL);
784 if (!chip->allocated_banks)
785 return -ENOMEM;
786
787 chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
788 chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
789 chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
790 chip->nr_allocated_banks = 1;
791
792 return 0;
793}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d103545e4055..ba9acae83bff 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -840,7 +840,7 @@ struct tpm2_pcr_selection {
840 u8 pcr_select[3]; 840 u8 pcr_select[3];
841} __packed; 841} __packed;
842 842
843static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) 843ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
844{ 844{
845 struct tpm2_pcr_selection pcr_selection; 845 struct tpm2_pcr_selection pcr_selection;
846 struct tpm_buf buf; 846 struct tpm_buf buf;
@@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
1040 goto out; 1040 goto out;
1041 } 1041 }
1042 1042
1043 rc = tpm2_get_pcr_allocation(chip);
1044 if (rc)
1045 goto out;
1046
1047 rc = tpm2_get_cc_attrs_tbl(chip); 1043 rc = tpm2_get_cc_attrs_tbl(chip);
1048 1044
1049out: 1045out:
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 44db83a6d01c..44a46dcc0518 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
141 continue; 141 continue;
142 142
143 div = DIV_ROUND_CLOSEST(parent_rate, req->rate); 143 div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
144 if (div > GENERATED_MAX_DIV + 1)
145 div = GENERATED_MAX_DIV + 1;
144 146
145 clk_generated_best_diff(req, parent, parent_rate, div, 147 clk_generated_best_diff(req, parent, parent_rate, div,
146 &best_diff, &best_rate); 148 &best_diff, &best_rate);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c0990703ce54..1c46babeb093 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
324 return NULL; 324 return NULL;
325} 325}
326 326
327#ifdef CONFIG_OF
328static int of_parse_clkspec(const struct device_node *np, int index,
329 const char *name, struct of_phandle_args *out_args);
330static struct clk_hw *
331of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
332#else
333static inline int of_parse_clkspec(const struct device_node *np, int index,
334 const char *name,
335 struct of_phandle_args *out_args)
336{
337 return -ENOENT;
338}
339static inline struct clk_hw *
340of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
341{
342 return ERR_PTR(-ENOENT);
343}
344#endif
345
327/** 346/**
328 * clk_core_get - Find the clk_core parent of a clk 347 * clk_core_get - Find the clk_core parent of a clk
329 * @core: clk to find parent of 348 * @core: clk to find parent of
@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
355 * }; 374 * };
356 * 375 *
357 * Returns: -ENOENT when the provider can't be found or the clk doesn't 376 * Returns: -ENOENT when the provider can't be found or the clk doesn't
358 * exist in the provider. -EINVAL when the name can't be found. NULL when the 377 * exist in the provider or the name can't be found in the DT node or
359 * provider knows about the clk but it isn't provided on this system. 378 * in a clkdev lookup. NULL when the provider knows about the clk but it
379 * isn't provided on this system.
360 * A valid clk_core pointer when the clk can be found in the provider. 380 * A valid clk_core pointer when the clk can be found in the provider.
361 */ 381 */
362static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 382static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
367 struct device *dev = core->dev; 387 struct device *dev = core->dev;
368 const char *dev_id = dev ? dev_name(dev) : NULL; 388 const char *dev_id = dev ? dev_name(dev) : NULL;
369 struct device_node *np = core->of_node; 389 struct device_node *np = core->of_node;
390 struct of_phandle_args clkspec;
370 391
371 if (np && (name || index >= 0)) 392 if (np && (name || index >= 0) &&
372 hw = of_clk_get_hw(np, index, name); 393 !of_parse_clkspec(np, index, name, &clkspec)) {
373 394 hw = of_clk_get_hw_from_clkspec(&clkspec);
374 /* 395 of_node_put(clkspec.np);
375 * If the DT search above couldn't find the provider or the provider 396 } else if (name) {
376 * didn't know about this clk, fallback to looking up via clkdev based 397 /*
377 * clk_lookups 398 * If the DT search above couldn't find the provider fallback to
378 */ 399 * looking up via clkdev based clk_lookups.
379 if (PTR_ERR(hw) == -ENOENT && name) 400 */
380 hw = clk_find_hw(dev_id, name); 401 hw = clk_find_hw(dev_id, name);
402 }
381 403
382 if (IS_ERR(hw)) 404 if (IS_ERR(hw))
383 return ERR_CAST(hw); 405 return ERR_CAST(hw);
@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
401 parent = ERR_PTR(-EPROBE_DEFER); 423 parent = ERR_PTR(-EPROBE_DEFER);
402 } else { 424 } else {
403 parent = clk_core_get(core, index); 425 parent = clk_core_get(core, index);
404 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) 426 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
405 parent = clk_core_lookup(entry->name); 427 parent = clk_core_lookup(entry->name);
406 } 428 }
407 429
@@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
1632 break; 1654 break;
1633 1655
1634 /* Fallback to comparing globally unique names */ 1656 /* Fallback to comparing globally unique names */
1635 if (!strcmp(parent->name, core->parents[i].name)) 1657 if (core->parents[i].name &&
1658 !strcmp(parent->name, core->parents[i].name))
1636 break; 1659 break;
1637 } 1660 }
1638 1661
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 1aa5f4059251..73b7e238eee7 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
25 FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), 25 FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
26}; 26};
27 27
28static const struct mtk_fixed_factor top_early_divs[] = {
29 FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
30};
31
28static const struct mtk_fixed_factor top_divs[] = { 32static const struct mtk_fixed_factor top_divs[] = {
29 FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
30 2),
31 FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, 33 FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
32 2), 34 2),
33 FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, 35 FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
1148 return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); 1150 return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
1149} 1151}
1150 1152
1153static struct clk_onecell_data *top_clk_data;
1154
1155static void clk_mt8183_top_init_early(struct device_node *node)
1156{
1157 int i;
1158
1159 top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
1160
1161 for (i = 0; i < CLK_TOP_NR_CLK; i++)
1162 top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
1163
1164 mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
1165 top_clk_data);
1166
1167 of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
1168}
1169
1170CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
1171 clk_mt8183_top_init_early);
1172
1151static int clk_mt8183_top_probe(struct platform_device *pdev) 1173static int clk_mt8183_top_probe(struct platform_device *pdev)
1152{ 1174{
1153 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1175 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154 void __iomem *base; 1176 void __iomem *base;
1155 struct clk_onecell_data *clk_data;
1156 struct device_node *node = pdev->dev.of_node; 1177 struct device_node *node = pdev->dev.of_node;
1157 1178
1158 base = devm_ioremap_resource(&pdev->dev, res); 1179 base = devm_ioremap_resource(&pdev->dev, res);
1159 if (IS_ERR(base)) 1180 if (IS_ERR(base))
1160 return PTR_ERR(base); 1181 return PTR_ERR(base);
1161 1182
1162 clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
1163
1164 mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), 1183 mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
1165 clk_data); 1184 top_clk_data);
1185
1186 mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
1187 top_clk_data);
1166 1188
1167 mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); 1189 mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
1168 1190
1169 mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), 1191 mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
1170 node, &mt8183_clk_lock, clk_data); 1192 node, &mt8183_clk_lock, top_clk_data);
1171 1193
1172 mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), 1194 mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
1173 base, &mt8183_clk_lock, clk_data); 1195 base, &mt8183_clk_lock, top_clk_data);
1174 1196
1175 mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), 1197 mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
1176 base, &mt8183_clk_lock, clk_data); 1198 base, &mt8183_clk_lock, top_clk_data);
1177 1199
1178 mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), 1200 mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
1179 clk_data); 1201 top_clk_data);
1180 1202
1181 return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); 1203 return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
1182} 1204}
1183 1205
1184static int clk_mt8183_infra_probe(struct platform_device *pdev) 1206static int clk_mt8183_infra_probe(struct platform_device *pdev)
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 52bbb9ce3807..d4075b130674 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
572 unsigned int reg = id / 32; 572 unsigned int reg = id / 32;
573 unsigned int bit = id % 32; 573 unsigned int bit = id % 32;
574 u32 bitmask = BIT(bit); 574 u32 bitmask = BIT(bit);
575 unsigned long flags;
576 u32 value;
577 575
578 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); 576 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
579 577
580 /* Reset module */ 578 /* Reset module */
581 spin_lock_irqsave(&priv->rmw_lock, flags); 579 writel(bitmask, priv->base + SRCR(reg));
582 value = readl(priv->base + SRCR(reg));
583 value |= bitmask;
584 writel(value, priv->base + SRCR(reg));
585 spin_unlock_irqrestore(&priv->rmw_lock, flags);
586 580
587 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ 581 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
588 udelay(35); 582 udelay(35);
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
599 unsigned int reg = id / 32; 593 unsigned int reg = id / 32;
600 unsigned int bit = id % 32; 594 unsigned int bit = id % 32;
601 u32 bitmask = BIT(bit); 595 u32 bitmask = BIT(bit);
602 unsigned long flags;
603 u32 value;
604 596
605 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); 597 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
606 598
607 spin_lock_irqsave(&priv->rmw_lock, flags); 599 writel(bitmask, priv->base + SRCR(reg));
608 value = readl(priv->base + SRCR(reg));
609 value |= bitmask;
610 writel(value, priv->base + SRCR(reg));
611 spin_unlock_irqrestore(&priv->rmw_lock, flags);
612 return 0; 600 return 0;
613} 601}
614 602
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 91db7894125d..65c82d922b05 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -14,7 +14,7 @@
14#include "clk-exynos5-subcmu.h" 14#include "clk-exynos5-subcmu.h"
15 15
16static struct samsung_clk_provider *ctx; 16static struct samsung_clk_provider *ctx;
17static const struct exynos5_subcmu_info *cmu; 17static const struct exynos5_subcmu_info **cmu;
18static int nr_cmus; 18static int nr_cmus;
19 19
20static void exynos5_subcmu_clk_save(void __iomem *base, 20static void exynos5_subcmu_clk_save(void __iomem *base,
@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
56 * when OF-core populates all device-tree nodes. 56 * when OF-core populates all device-tree nodes.
57 */ 57 */
58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, 58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
59 const struct exynos5_subcmu_info *_cmu) 59 const struct exynos5_subcmu_info **_cmu)
60{ 60{
61 ctx = _ctx; 61 ctx = _ctx;
62 cmu = _cmu; 62 cmu = _cmu;
63 nr_cmus = _nr_cmus; 63 nr_cmus = _nr_cmus;
64 64
65 for (; _nr_cmus--; _cmu++) { 65 for (; _nr_cmus--; _cmu++) {
66 exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, 66 exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
67 _cmu->nr_gate_clks); 67 (*_cmu)->nr_gate_clks);
68 exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, 68 exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
69 _cmu->nr_suspend_regs); 69 (*_cmu)->nr_suspend_regs);
70 } 70 }
71} 71}
72 72
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
163 if (of_property_read_string(np, "label", &name) < 0) 163 if (of_property_read_string(np, "label", &name) < 0)
164 continue; 164 continue;
165 for (i = 0; i < nr_cmus; i++) 165 for (i = 0; i < nr_cmus; i++)
166 if (strcmp(cmu[i].pd_name, name) == 0) 166 if (strcmp(cmu[i]->pd_name, name) == 0)
167 exynos5_clk_register_subcmu(&pdev->dev, 167 exynos5_clk_register_subcmu(&pdev->dev,
168 &cmu[i], np); 168 cmu[i], np);
169 } 169 }
170 return 0; 170 return 0;
171} 171}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
index 755ee8aaa3de..9ae5356f25aa 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
21}; 21};
22 22
23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, 23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
24 const struct exynos5_subcmu_info *cmu); 24 const struct exynos5_subcmu_info **cmu);
25 25
26#endif 26#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f2b896881768..931c70a4da19 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
681 .pd_name = "DISP1", 681 .pd_name = "DISP1",
682}; 682};
683 683
684static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
685 &exynos5250_disp_subcmu,
686};
687
684static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { 688static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
685 /* sorted in descending order */ 689 /* sorted in descending order */
686 /* PLL_36XX_RATE(rate, m, p, s, k) */ 690 /* PLL_36XX_RATE(rate, m, p, s, k) */
@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
843 847
844 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, 848 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
845 ARRAY_SIZE(exynos5250_clk_regs)); 849 ARRAY_SIZE(exynos5250_clk_regs));
846 exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); 850 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
851 exynos5250_subcmus);
847 852
848 samsung_clk_of_add_provider(np, ctx); 853 samsung_clk_of_add_provider(np, ctx);
849 854
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 01bca5a498b2..7670cc596c74 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
534 GATE_BUS_TOP, 24, 0, 0), 534 GATE_BUS_TOP, 24, 0, 0),
535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), 536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
537 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
538 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
539}; 537};
540 538
541static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 539static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
577 575
578static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { 576static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
579 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), 577 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
578 /* Maudio Block */
580 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 579 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
581 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), 580 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
581 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
582 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
583 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
584 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
582}; 585};
583 586
584static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { 587static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
@@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
890 /* GSCL Block */ 893 /* GSCL Block */
891 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), 894 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
892 895
893 /* MSCL Block */
894 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
895
896 /* PSGEN */ 896 /* PSGEN */
897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), 897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), 898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
@@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", 1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), 1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
1019 1019
1020 /* Maudio Block */
1021 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1022 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1023 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1024 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1025
1026 /* FSYS Block */ 1020 /* FSYS Block */
1027 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), 1021 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
1028 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), 1022 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
@@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1162 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", 1156 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
1163 GATE_IP_GSCL1, 17, 0, 0), 1157 GATE_IP_GSCL1, 17, 0, 0),
1164 1158
1165 /* MSCL Block */
1166 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1167 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1168 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1169 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1170 GATE_IP_MSCL, 8, 0, 0),
1171 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1172 GATE_IP_MSCL, 9, 0, 0),
1173 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1174 GATE_IP_MSCL, 10, 0, 0),
1175
1176 /* ISP */ 1159 /* ISP */
1177 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", 1160 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
1178 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), 1161 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
1281 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ 1264 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
1282}; 1265};
1283 1266
1284static const struct exynos5_subcmu_info exynos5x_subcmus[] = { 1267static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
1285 { 1268 /* MSCL Block */
1286 .div_clks = exynos5x_disp_div_clks, 1269 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1287 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), 1270 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1288 .gate_clks = exynos5x_disp_gate_clks, 1271 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1289 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), 1272 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1290 .suspend_regs = exynos5x_disp_suspend_regs, 1273 GATE_IP_MSCL, 8, 0, 0),
1291 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), 1274 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1292 .pd_name = "DISP", 1275 GATE_IP_MSCL, 9, 0, 0),
1293 }, { 1276 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1294 .div_clks = exynos5x_gsc_div_clks, 1277 GATE_IP_MSCL, 10, 0, 0),
1295 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), 1278};
1296 .gate_clks = exynos5x_gsc_gate_clks, 1279
1297 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), 1280static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
1298 .suspend_regs = exynos5x_gsc_suspend_regs, 1281 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
1299 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), 1282};
1300 .pd_name = "GSC", 1283
1301 }, { 1284static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
1302 .div_clks = exynos5x_mfc_div_clks, 1285 { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
1303 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), 1286 { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
1304 .gate_clks = exynos5x_mfc_gate_clks, 1287 { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
1305 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), 1288};
1306 .suspend_regs = exynos5x_mfc_suspend_regs, 1289
1307 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), 1290static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
1308 .pd_name = "MFC", 1291 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
1309 }, 1292 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
1293 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1294 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1295 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1296 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1297};
1298
1299static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
1300 { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
1301};
1302
1303static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
1304 .div_clks = exynos5x_disp_div_clks,
1305 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
1306 .gate_clks = exynos5x_disp_gate_clks,
1307 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
1308 .suspend_regs = exynos5x_disp_suspend_regs,
1309 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
1310 .pd_name = "DISP",
1311};
1312
1313static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
1314 .div_clks = exynos5x_gsc_div_clks,
1315 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
1316 .gate_clks = exynos5x_gsc_gate_clks,
1317 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
1318 .suspend_regs = exynos5x_gsc_suspend_regs,
1319 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
1320 .pd_name = "GSC",
1321};
1322
1323static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
1324 .div_clks = exynos5x_mfc_div_clks,
1325 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
1326 .gate_clks = exynos5x_mfc_gate_clks,
1327 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
1328 .suspend_regs = exynos5x_mfc_suspend_regs,
1329 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
1330 .pd_name = "MFC",
1331};
1332
1333static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
1334 .div_clks = exynos5x_mscl_div_clks,
1335 .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
1336 .gate_clks = exynos5x_mscl_gate_clks,
1337 .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
1338 .suspend_regs = exynos5x_mscl_suspend_regs,
1339 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
1340 .pd_name = "MSC",
1341};
1342
1343static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
1344 .gate_clks = exynos5800_mau_gate_clks,
1345 .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
1346 .suspend_regs = exynos5800_mau_suspend_regs,
1347 .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
1348 .pd_name = "MAU",
1349};
1350
1351static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1352 &exynos5x_disp_subcmu,
1353 &exynos5x_gsc_subcmu,
1354 &exynos5x_mfc_subcmu,
1355 &exynos5x_mscl_subcmu,
1356};
1357
1358static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
1359 &exynos5x_disp_subcmu,
1360 &exynos5x_gsc_subcmu,
1361 &exynos5x_mfc_subcmu,
1362 &exynos5x_mscl_subcmu,
1363 &exynos5800_mau_subcmu,
1310}; 1364};
1311 1365
1312static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { 1366static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
@@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
1539 samsung_clk_extended_sleep_init(reg_base, 1593 samsung_clk_extended_sleep_init(reg_base,
1540 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), 1594 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
1541 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); 1595 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
1542 if (soc == EXYNOS5800) 1596
1597 if (soc == EXYNOS5800) {
1543 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, 1598 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
1544 ARRAY_SIZE(exynos5800_clk_regs)); 1599 ARRAY_SIZE(exynos5800_clk_regs));
1545 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), 1600
1546 exynos5x_subcmus); 1601 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
1602 exynos5800_subcmus);
1603 } else {
1604 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
1605 exynos5x_subcmus);
1606 }
1547 1607
1548 samsung_clk_of_add_provider(np, ctx); 1608 samsung_clk_of_add_provider(np, ctx);
1549} 1609}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 5c50e723ecae..1a191eeeebba 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
38 if (socfpgaclk->fixed_div) { 38 if (socfpgaclk->fixed_div) {
39 div = socfpgaclk->fixed_div; 39 div = socfpgaclk->fixed_div;
40 } else { 40 } else {
41 if (!socfpgaclk->bypass_reg) 41 if (socfpgaclk->hw.reg)
42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); 42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
43 } 43 }
44 44
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 91d3d721c801..3c219af25100 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
3 tristate "Clock support for Spreadtrum SoCs" 3 tristate "Clock support for Spreadtrum SoCs"
4 depends on ARCH_SPRD || COMPILE_TEST 4 depends on ARCH_SPRD || COMPILE_TEST
5 default ARCH_SPRD 5 default ARCH_SPRD
6 select REGMAP_MMIO
6 7
7if SPRD_COMMON_CLK 8if SPRD_COMMON_CLK
8 9
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 5e6038fbf115..09e031176bc6 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -55,7 +55,7 @@ static u64 riscv_sched_clock(void)
55 return get_cycles64(); 55 return get_cycles64();
56} 56}
57 57
58static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = { 58static struct clocksource riscv_clocksource = {
59 .name = "riscv_clocksource", 59 .name = "riscv_clocksource",
60 .rating = 300, 60 .rating = 300,
61 .mask = CLOCKSOURCE_MASK(64), 61 .mask = CLOCKSOURCE_MASK(64),
@@ -92,7 +92,6 @@ void riscv_timer_interrupt(void)
92static int __init riscv_timer_init_dt(struct device_node *n) 92static int __init riscv_timer_init_dt(struct device_node *n)
93{ 93{
94 int cpuid, hartid, error; 94 int cpuid, hartid, error;
95 struct clocksource *cs;
96 95
97 hartid = riscv_of_processor_hartid(n); 96 hartid = riscv_of_processor_hartid(n);
98 if (hartid < 0) { 97 if (hartid < 0) {
@@ -112,8 +111,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
112 111
113 pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n", 112 pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
114 __func__, cpuid, hartid); 113 __func__, cpuid, hartid);
115 cs = per_cpu_ptr(&riscv_clocksource, cpuid); 114 error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
116 error = clocksource_register_hz(cs, riscv_timebase);
117 if (error) { 115 if (error) {
118 pr_err("RISCV timer register failed [%d] for cpu = [%d]\n", 116 pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
119 error, cpuid); 117 error, cpuid);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 23553ed6b548..2d22d6bf52f2 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -248,16 +248,12 @@ static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
248 return 0; 248 return 0;
249} 249}
250 250
251static struct cn_dev cdev = {
252 .input = cn_rx_skb,
253};
254
255static int cn_init(void) 251static int cn_init(void)
256{ 252{
257 struct cn_dev *dev = &cdev; 253 struct cn_dev *dev = &cdev;
258 struct netlink_kernel_cfg cfg = { 254 struct netlink_kernel_cfg cfg = {
259 .groups = CN_NETLINK_USERS + 0xf, 255 .groups = CN_NETLINK_USERS + 0xf,
260 .input = dev->input, 256 .input = cn_rx_skb,
261 }; 257 };
262 258
263 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg); 259 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8dda62367816..c28ebf2810f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
2528 } 2528 }
2529 2529
2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); 2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
2531 if (ret) 2531 if (ret < 0)
2532 break; 2532 break;
2533 } 2533 }
2534 2534
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 93f39a1d4c3d..c66f566a854c 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
131 int err = -ENODEV; 131 int err = -ENODEV;
132 132
133 cpu = of_get_cpu_node(policy->cpu, NULL); 133 cpu = of_get_cpu_node(policy->cpu, NULL);
134 if (!cpu)
135 goto out;
134 136
137 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
135 of_node_put(cpu); 138 of_node_put(cpu);
136 if (!cpu) 139 if (!max_freqp) {
140 err = -EINVAL;
137 goto out; 141 goto out;
142 }
143
144 /* we need the freq in kHz */
145 max_freq = *max_freqp / 1000;
138 146
139 dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); 147 dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
140 if (!dn) 148 if (!dn)
@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
171 } 179 }
172 180
173 pr_debug("init cpufreq on CPU %d\n", policy->cpu); 181 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
174
175 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
176 if (!max_freqp) {
177 err = -EINVAL;
178 goto out_unmap_sdcpwr;
179 }
180
181 /* we need the freq in kHz */
182 max_freq = *max_freqp / 1000;
183
184 pr_debug("max clock-frequency is at %u kHz\n", max_freq); 182 pr_debug("max clock-frequency is at %u kHz\n", max_freq);
185 pr_debug("initializing frequency table\n"); 183 pr_debug("initializing frequency table\n");
186 184
@@ -199,9 +197,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
199 cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); 197 cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
200 return 0; 198 return 0;
201 199
202out_unmap_sdcpwr:
203 iounmap(sdcpwr_mapbase);
204
205out_unmap_sdcasr: 200out_unmap_sdcasr:
206 iounmap(sdcasr_mapbase); 201 iounmap(sdcasr_mapbase);
207out: 202out:
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index f9fec2ddf56a..94c1ad7eeddf 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -58,6 +58,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
58static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, 58static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
59 unsigned int authsize) 59 unsigned int authsize)
60{ 60{
61 switch (authsize) {
62 case 16:
63 case 15:
64 case 14:
65 case 13:
66 case 12:
67 case 8:
68 case 4:
69 break;
70 default:
71 return -EINVAL;
72 }
73
61 return 0; 74 return 0;
62} 75}
63 76
@@ -104,6 +117,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
104 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 117 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
105 INIT_LIST_HEAD(&rctx->cmd.entry); 118 INIT_LIST_HEAD(&rctx->cmd.entry);
106 rctx->cmd.engine = CCP_ENGINE_AES; 119 rctx->cmd.engine = CCP_ENGINE_AES;
120 rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
107 rctx->cmd.u.aes.type = ctx->u.aes.type; 121 rctx->cmd.u.aes.type = ctx->u.aes.type;
108 rctx->cmd.u.aes.mode = ctx->u.aes.mode; 122 rctx->cmd.u.aes.mode = ctx->u.aes.mode;
109 rctx->cmd.u.aes.action = encrypt; 123 rctx->cmd.u.aes.action = encrypt;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index f79eede71c62..edefa669153f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
540 unsigned long flags; 540 unsigned long flags;
541 unsigned int i; 541 unsigned int i;
542 542
543 /* If there's no device there's nothing to do */
544 if (!ccp)
545 return 0;
546
543 spin_lock_irqsave(&ccp->cmd_lock, flags); 547 spin_lock_irqsave(&ccp->cmd_lock, flags);
544 548
545 ccp->suspending = 1; 549 ccp->suspending = 1;
@@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp)
564 unsigned long flags; 568 unsigned long flags;
565 unsigned int i; 569 unsigned int i;
566 570
571 /* If there's no device there's nothing to do */
572 if (!ccp)
573 return 0;
574
567 spin_lock_irqsave(&ccp->cmd_lock, flags); 575 spin_lock_irqsave(&ccp->cmd_lock, flags);
568 576
569 ccp->suspending = 0; 577 ccp->suspending = 0;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c69ed4bae2eb..9bc3c62157d7 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
622 622
623 unsigned long long *final; 623 unsigned long long *final;
624 unsigned int dm_offset; 624 unsigned int dm_offset;
625 unsigned int authsize;
625 unsigned int jobid; 626 unsigned int jobid;
626 unsigned int ilen; 627 unsigned int ilen;
627 bool in_place = true; /* Default value */ 628 bool in_place = true; /* Default value */
@@ -643,6 +644,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
643 if (!aes->key) /* Gotta have a key SGL */ 644 if (!aes->key) /* Gotta have a key SGL */
644 return -EINVAL; 645 return -EINVAL;
645 646
647 /* Zero defaults to 16 bytes, the maximum size */
648 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
649 switch (authsize) {
650 case 16:
651 case 15:
652 case 14:
653 case 13:
654 case 12:
655 case 8:
656 case 4:
657 break;
658 default:
659 return -EINVAL;
660 }
661
646 /* First, decompose the source buffer into AAD & PT, 662 /* First, decompose the source buffer into AAD & PT,
647 * and the destination buffer into AAD, CT & tag, or 663 * and the destination buffer into AAD, CT & tag, or
648 * the input into CT & tag. 664 * the input into CT & tag.
@@ -657,7 +673,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
657 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); 673 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
658 } else { 674 } else {
659 /* Input length for decryption includes tag */ 675 /* Input length for decryption includes tag */
660 ilen = aes->src_len - AES_BLOCK_SIZE; 676 ilen = aes->src_len - authsize;
661 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); 677 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
662 } 678 }
663 679
@@ -766,8 +782,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
766 while (src.sg_wa.bytes_left) { 782 while (src.sg_wa.bytes_left) {
767 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); 783 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
768 if (!src.sg_wa.bytes_left) { 784 if (!src.sg_wa.bytes_left) {
769 unsigned int nbytes = aes->src_len 785 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
770 % AES_BLOCK_SIZE;
771 786
772 if (nbytes) { 787 if (nbytes) {
773 op.eom = 1; 788 op.eom = 1;
@@ -839,19 +854,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
839 854
840 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 855 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
841 /* Put the ciphered tag after the ciphertext. */ 856 /* Put the ciphered tag after the ciphertext. */
842 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); 857 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
843 } else { 858 } else {
844 /* Does this ciphered tag match the input? */ 859 /* Does this ciphered tag match the input? */
845 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, 860 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
846 DMA_BIDIRECTIONAL); 861 DMA_BIDIRECTIONAL);
847 if (ret) 862 if (ret)
848 goto e_tag; 863 goto e_tag;
849 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); 864 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
850 if (ret) 865 if (ret)
851 goto e_tag; 866 goto e_tag;
852 867
853 ret = crypto_memneq(tag.address, final_wa.address, 868 ret = crypto_memneq(tag.address, final_wa.address,
854 AES_BLOCK_SIZE) ? -EBADMSG : 0; 869 authsize) ? -EBADMSG : 0;
855 ccp_dm_free(&tag); 870 ccp_dm_free(&tag);
856 } 871 }
857 872
@@ -859,11 +874,11 @@ e_tag:
859 ccp_dm_free(&final_wa); 874 ccp_dm_free(&final_wa);
860 875
861e_dst: 876e_dst:
862 if (aes->src_len && !in_place) 877 if (ilen > 0 && !in_place)
863 ccp_free_data(&dst, cmd_q); 878 ccp_free_data(&dst, cmd_q);
864 879
865e_src: 880e_src:
866 if (aes->src_len) 881 if (ilen > 0)
867 ccp_free_data(&src, cmd_q); 882 ccp_free_data(&src, cmd_q);
868 883
869e_aad: 884e_aad:
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index ece83a363e11..f22f6fa612b3 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -314,14 +314,17 @@ void cryp_save_device_context(struct cryp_device_data *device_data,
314 case CRYP_KEY_SIZE_256: 314 case CRYP_KEY_SIZE_256:
315 ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); 315 ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
316 ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); 316 ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
317 /* Fall through */
317 318
318 case CRYP_KEY_SIZE_192: 319 case CRYP_KEY_SIZE_192:
319 ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); 320 ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
320 ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); 321 ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
322 /* Fall through */
321 323
322 case CRYP_KEY_SIZE_128: 324 case CRYP_KEY_SIZE_128:
323 ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); 325 ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
324 ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); 326 ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
327 /* Fall through */
325 328
326 default: 329 default:
327 ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); 330 ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
@@ -361,14 +364,17 @@ void cryp_restore_device_context(struct cryp_device_data *device_data,
361 case CRYP_KEY_SIZE_256: 364 case CRYP_KEY_SIZE_256:
362 writel_relaxed(ctx->key_4_l, &reg->key_4_l); 365 writel_relaxed(ctx->key_4_l, &reg->key_4_l);
363 writel_relaxed(ctx->key_4_r, &reg->key_4_r); 366 writel_relaxed(ctx->key_4_r, &reg->key_4_r);
367 /* Fall through */
364 368
365 case CRYP_KEY_SIZE_192: 369 case CRYP_KEY_SIZE_192:
366 writel_relaxed(ctx->key_3_l, &reg->key_3_l); 370 writel_relaxed(ctx->key_3_l, &reg->key_3_l);
367 writel_relaxed(ctx->key_3_r, &reg->key_3_r); 371 writel_relaxed(ctx->key_3_r, &reg->key_3_r);
372 /* Fall through */
368 373
369 case CRYP_KEY_SIZE_128: 374 case CRYP_KEY_SIZE_128:
370 writel_relaxed(ctx->key_2_l, &reg->key_2_l); 375 writel_relaxed(ctx->key_2_l, &reg->key_2_l);
371 writel_relaxed(ctx->key_2_r, &reg->key_2_r); 376 writel_relaxed(ctx->key_2_r, &reg->key_2_r);
377 /* Fall through */
372 378
373 default: 379 default:
374 writel_relaxed(ctx->key_1_l, &reg->key_1_l); 380 writel_relaxed(ctx->key_1_l, &reg->key_1_l);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
50 50
51struct dw_edma_region { 51struct dw_edma_region {
52 phys_addr_t paddr; 52 phys_addr_t paddr;
53 dma_addr_t vaddr; 53 void __iomem *vaddr;
54 size_t sz; 54 size_t sz;
55}; 55};
56 56
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
130 chip->id = pdev->devfn; 130 chip->id = pdev->devfn;
131 chip->irq = pdev->irq; 131 chip->irq = pdev->irq;
132 132
133 dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; 133 dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
134 dw->rg_region.vaddr += pdata->rg_off; 134 dw->rg_region.vaddr += pdata->rg_off;
135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; 135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
136 dw->rg_region.paddr += pdata->rg_off; 136 dw->rg_region.paddr += pdata->rg_off;
137 dw->rg_region.sz = pdata->rg_sz; 137 dw->rg_region.sz = pdata->rg_sz;
138 138
139 dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; 139 dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
140 dw->ll_region.vaddr += pdata->ll_off; 140 dw->ll_region.vaddr += pdata->ll_off;
141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; 141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
142 dw->ll_region.paddr += pdata->ll_off; 142 dw->ll_region.paddr += pdata->ll_off;
143 dw->ll_region.sz = pdata->ll_sz; 143 dw->ll_region.sz = pdata->ll_sz;
144 144
145 dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; 145 dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
146 dw->dt_region.vaddr += pdata->dt_off; 146 dw->dt_region.vaddr += pdata->dt_off;
147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; 147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
148 dw->dt_region.paddr += pdata->dt_off; 148 dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
158 pci_dbg(pdev, "Mode:\t%s\n", 158 pci_dbg(pdev, "Mode:\t%s\n",
159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); 159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
160 160
161 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 161 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz, 162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
163 &dw->rg_region.vaddr, &dw->rg_region.paddr); 163 dw->rg_region.vaddr, &dw->rg_region.paddr);
164 164
165 pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 165 pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz, 166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
167 &dw->ll_region.vaddr, &dw->ll_region.paddr); 167 dw->ll_region.vaddr, &dw->ll_region.paddr);
168 168
169 pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 169 pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz, 170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
171 &dw->dt_region.vaddr, &dw->dt_region.paddr); 171 dw->dt_region.vaddr, &dw->dt_region.paddr);
172 172
173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); 173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
174 174
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
25 25
26static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) 26static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27{ 27{
28 return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; 28 return dw->rg_region.vaddr;
29} 29}
30 30
31#define SET(dw, name, value) \ 31#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
192static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) 192static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
193{ 193{
194 struct dw_edma_burst *child; 194 struct dw_edma_burst *child;
195 struct dw_edma_v0_lli *lli; 195 struct dw_edma_v0_lli __iomem *lli;
196 struct dw_edma_v0_llp *llp; 196 struct dw_edma_v0_llp __iomem *llp;
197 u32 control = 0, i = 0; 197 u32 control = 0, i = 0;
198 u64 sar, dar, addr;
199 int j; 198 int j;
200 199
201 lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; 200 lli = chunk->ll_region.vaddr;
202 201
203 if (chunk->cb) 202 if (chunk->cb)
204 control = DW_EDMA_V0_CB; 203 control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
214 /* Transfer size */ 213 /* Transfer size */
215 SET_LL(&lli[i].transfer_size, child->sz); 214 SET_LL(&lli[i].transfer_size, child->sz);
216 /* SAR - low, high */ 215 /* SAR - low, high */
217 sar = cpu_to_le64(child->sar); 216 SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
218 SET_LL(&lli[i].sar_low, lower_32_bits(sar)); 217 SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
219 SET_LL(&lli[i].sar_high, upper_32_bits(sar));
220 /* DAR - low, high */ 218 /* DAR - low, high */
221 dar = cpu_to_le64(child->dar); 219 SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
222 SET_LL(&lli[i].dar_low, lower_32_bits(dar)); 220 SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
223 SET_LL(&lli[i].dar_high, upper_32_bits(dar));
224 i++; 221 i++;
225 } 222 }
226 223
227 llp = (struct dw_edma_v0_llp *)&lli[i]; 224 llp = (void __iomem *)&lli[i];
228 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; 225 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
229 if (!chunk->cb) 226 if (!chunk->cb)
230 control |= DW_EDMA_V0_CB; 227 control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
232 /* Channel control */ 229 /* Channel control */
233 SET_LL(&llp->control, control); 230 SET_LL(&llp->control, control);
234 /* Linked list - low, high */ 231 /* Linked list - low, high */
235 addr = cpu_to_le64(chunk->ll_region.paddr); 232 SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
236 SET_LL(&llp->llp_low, lower_32_bits(addr)); 233 SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
237 SET_LL(&llp->llp_high, upper_32_bits(addr));
238} 234}
239 235
240void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) 236void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
242 struct dw_edma_chan *chan = chunk->chan; 238 struct dw_edma_chan *chan = chunk->chan;
243 struct dw_edma *dw = chan->chip->dw; 239 struct dw_edma *dw = chan->chip->dw;
244 u32 tmp; 240 u32 tmp;
245 u64 llp;
246 241
247 dw_edma_v0_core_write_chunk(chunk); 242 dw_edma_v0_core_write_chunk(chunk);
248 243
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
262 SET_CH(dw, chan->dir, chan->id, ch_control1, 257 SET_CH(dw, chan->dir, chan->id, ch_control1,
263 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); 258 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
264 /* Linked list - low, high */ 259 /* Linked list - low, high */
265 llp = cpu_to_le64(chunk->ll_region.paddr); 260 SET_CH(dw, chan->dir, chan->id, llp_low,
266 SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); 261 lower_32_bits(chunk->ll_region.paddr));
267 SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); 262 SET_CH(dw, chan->dir, chan->id, llp_high,
263 upper_32_bits(chunk->ll_region.paddr));
268 } 264 }
269 /* Doorbell */ 265 /* Doorbell */
270 SET_RW(dw, chan->dir, doorbell, 266 SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
14#include "dw-edma-core.h" 14#include "dw-edma-core.h"
15 15
16#define REGS_ADDR(name) \ 16#define REGS_ADDR(name) \
17 ((dma_addr_t *)&regs->name) 17 ((void __force *)&regs->name)
18#define REGISTER(name) \ 18#define REGISTER(name) \
19 { #name, REGS_ADDR(name) } 19 { #name, REGS_ADDR(name) }
20 20
@@ -40,36 +40,37 @@
40 40
41static struct dentry *base_dir; 41static struct dentry *base_dir;
42static struct dw_edma *dw; 42static struct dw_edma *dw;
43static struct dw_edma_v0_regs *regs; 43static struct dw_edma_v0_regs __iomem *regs;
44 44
45static struct { 45static struct {
46 void *start; 46 void __iomem *start;
47 void *end; 47 void __iomem *end;
48} lim[2][EDMA_V0_MAX_NR_CH]; 48} lim[2][EDMA_V0_MAX_NR_CH];
49 49
50struct debugfs_entries { 50struct debugfs_entries {
51 char name[24]; 51 const char *name;
52 dma_addr_t *reg; 52 dma_addr_t *reg;
53}; 53};
54 54
55static int dw_edma_debugfs_u32_get(void *data, u64 *val) 55static int dw_edma_debugfs_u32_get(void *data, u64 *val)
56{ 56{
57 void __iomem *reg = (void __force __iomem *)data;
57 if (dw->mode == EDMA_MODE_LEGACY && 58 if (dw->mode == EDMA_MODE_LEGACY &&
58 data >= (void *)&regs->type.legacy.ch) { 59 reg >= (void __iomem *)&regs->type.legacy.ch) {
59 void *ptr = (void *)&regs->type.legacy.ch; 60 void __iomem *ptr = &regs->type.legacy.ch;
60 u32 viewport_sel = 0; 61 u32 viewport_sel = 0;
61 unsigned long flags; 62 unsigned long flags;
62 u16 ch; 63 u16 ch;
63 64
64 for (ch = 0; ch < dw->wr_ch_cnt; ch++) 65 for (ch = 0; ch < dw->wr_ch_cnt; ch++)
65 if (lim[0][ch].start >= data && data < lim[0][ch].end) { 66 if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
66 ptr += (data - lim[0][ch].start); 67 ptr += (reg - lim[0][ch].start);
67 goto legacy_sel_wr; 68 goto legacy_sel_wr;
68 } 69 }
69 70
70 for (ch = 0; ch < dw->rd_ch_cnt; ch++) 71 for (ch = 0; ch < dw->rd_ch_cnt; ch++)
71 if (lim[1][ch].start >= data && data < lim[1][ch].end) { 72 if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
72 ptr += (data - lim[1][ch].start); 73 ptr += (reg - lim[1][ch].start);
73 goto legacy_sel_rd; 74 goto legacy_sel_rd;
74 } 75 }
75 76
@@ -86,7 +87,7 @@ legacy_sel_wr:
86 87
87 raw_spin_unlock_irqrestore(&dw->lock, flags); 88 raw_spin_unlock_irqrestore(&dw->lock, flags);
88 } else { 89 } else {
89 *val = readl(data); 90 *val = readl(reg);
90 } 91 }
91 92
92 return 0; 93 return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
105 } 106 }
106} 107}
107 108
108static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, 109static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
109 struct dentry *dir) 110 struct dentry *dir)
110{ 111{
111 int nr_entries; 112 int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
288 if (!dw) 289 if (!dw)
289 return; 290 return;
290 291
291 regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; 292 regs = dw->rg_region.vaddr;
292 if (!regs) 293 if (!regs)
293 return; 294 return;
294 295
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1163 switch (chan->feature & FSL_DMA_IP_MASK) { 1163 switch (chan->feature & FSL_DMA_IP_MASK) {
1164 case FSL_DMA_IP_85XX: 1164 case FSL_DMA_IP_85XX:
1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166 /* Fall through */
1166 case FSL_DMA_IP_83XX: 1167 case FSL_DMA_IP_83XX:
1167 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1168 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1168 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1169 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
142 * when the DMA hw is powered off. 142 * when the DMA hw is powered off.
143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
144 */ 144 */
145static u32 d40_backup_regs[] = { 145static __maybe_unused u32 d40_backup_regs[] = {
146 D40_DREG_LCPA, 146 D40_DREG_LCPA,
147 D40_DREG_LCLA, 147 D40_DREG_LCLA,
148 D40_DREG_PRMSE, 148 D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
211 211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213 213
214static u32 d40_backup_regs_chan[] = { 214static __maybe_unused u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG, 215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT, 216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR, 217 D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..1311de74bfdd 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
1366 1366
1367 chan = &dmadev->chan[id]; 1367 chan = &dmadev->chan[id];
1368 if (!chan) { 1368 if (!chan) {
1369 dev_err(chan2dev(chan), "MDMA channel not initialized\n"); 1369 dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
1370 goto exit; 1370 goto exit;
1371 } 1371 }
1372 1372
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..b33cf6e8ab8e 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
712 return chan; 712 return chan;
713} 713}
714 714
715static int tegra_adma_runtime_suspend(struct device *dev) 715static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
716{ 716{
717 struct tegra_adma *tdma = dev_get_drvdata(dev); 717 struct tegra_adma *tdma = dev_get_drvdata(dev);
718 struct tegra_adma_chan_regs *ch_reg; 718 struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +744,7 @@ clk_disable:
744 return 0; 744 return 0;
745} 745}
746 746
747static int tegra_adma_runtime_resume(struct device *dev) 747static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
748{ 748{
749 struct tegra_adma *tdma = dev_get_drvdata(dev); 749 struct tegra_adma *tdma = dev_get_drvdata(dev);
750 struct tegra_adma_chan_regs *ch_reg; 750 struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..ba27802efcd0 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1234 if (src_icg) { 1234 if (src_icg) {
1235 d->ccr |= CCR_SRC_AMODE_DBLIDX; 1235 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1236 d->ei = 1; 1236 d->ei = 1;
1237 d->fi = src_icg; 1237 d->fi = src_icg + 1;
1238 } else if (xt->src_inc) { 1238 } else if (xt->src_inc) {
1239 d->ccr |= CCR_SRC_AMODE_POSTINC; 1239 d->ccr |= CCR_SRC_AMODE_POSTINC;
1240 d->fi = 0; 1240 d->fi = 0;
@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1249 if (dst_icg) { 1249 if (dst_icg) {
1250 d->ccr |= CCR_DST_AMODE_DBLIDX; 1250 d->ccr |= CCR_DST_AMODE_DBLIDX;
1251 sg->ei = 1; 1251 sg->ei = 1;
1252 sg->fi = dst_icg; 1252 sg->fi = dst_icg + 1;
1253 } else if (xt->dst_inc) { 1253 } else if (xt->dst_inc) {
1254 d->ccr |= CCR_DST_AMODE_POSTINC; 1254 d->ccr |= CCR_DST_AMODE_POSTINC;
1255 sg->fi = 0; 1255 sg->fi = 0;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3dc1cbf849db..b785e936244f 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation)
957 device->bc_implemented = BC_IMPLEMENTED; 957 device->bc_implemented = BC_IMPLEMENTED;
958 break; 958 break;
959 } 959 }
960 /* else fall through to case address error */ 960 /* else, fall through - to case address error */
961 case RCODE_ADDRESS_ERROR: 961 case RCODE_ADDRESS_ERROR:
962 device->bc_implemented = BC_UNIMPLEMENTED; 962 device->bc_implemented = BC_UNIMPLEMENTED;
963 } 963 }
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 42566b7be8f5..df8a56a979b9 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -284,7 +284,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
284 if ((data[0] & bit) == (data[1] & bit)) 284 if ((data[0] & bit) == (data[1] & bit))
285 continue; 285 continue;
286 286
287 /* 1394-1995 IRM, fall through to retry. */ 287 /* fall through - It's a 1394-1995 IRM, retry. */
288 default: 288 default:
289 if (retry) { 289 if (retry) {
290 retry--; 290 retry--;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 46bd22dde535..94a13fca8267 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -54,6 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
54 switch (port_type) { 54 switch (port_type) {
55 case SELFID_PORT_CHILD: 55 case SELFID_PORT_CHILD:
56 (*child_port_count)++; 56 (*child_port_count)++;
57 /* fall through */
57 case SELFID_PORT_PARENT: 58 case SELFID_PORT_PARENT:
58 case SELFID_PORT_NCONN: 59 case SELFID_PORT_NCONN:
59 (*total_port_count)++; 60 (*total_port_count)++;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 53446e39a32c..ba8d3d0ef32c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
157 157
158config ISCSI_IBFT_FIND 158config ISCSI_IBFT_FIND
159 bool "iSCSI Boot Firmware Table Attributes" 159 bool "iSCSI Boot Firmware Table Attributes"
160 depends on X86 && ACPI 160 depends on X86 && ISCSI_IBFT
161 default n 161 default n
162 help 162 help
163 This option enables the kernel to find the region of memory 163 This option enables the kernel to find the region of memory
@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND
168config ISCSI_IBFT 168config ISCSI_IBFT
169 tristate "iSCSI Boot Firmware Table Attributes module" 169 tristate "iSCSI Boot Firmware Table Attributes module"
170 select ISCSI_BOOT_SYSFS 170 select ISCSI_BOOT_SYSFS
171 depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL 171 select ISCSI_IBFT_FIND if X86
172 depends on ACPI && SCSI && SCSI_LOWLEVEL
172 default n 173 default n
173 help 174 help
174 This option enables support for detection and exposing of iSCSI 175 This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 1db780c0f07b..3caae7f2cf56 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -927,17 +927,33 @@ fail:
927 return status; 927 return status;
928} 928}
929 929
930#define GET_EFI_CONFIG_TABLE(bits) \
931static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
932 efi_guid_t guid) \
933{ \
934 efi_system_table_##bits##_t *sys_table; \
935 efi_config_table_##bits##_t *tables; \
936 int i; \
937 \
938 sys_table = (typeof(sys_table))_sys_table; \
939 tables = (typeof(tables))(unsigned long)sys_table->tables; \
940 \
941 for (i = 0; i < sys_table->nr_tables; i++) { \
942 if (efi_guidcmp(tables[i].guid, guid) != 0) \
943 continue; \
944 \
945 return (void *)(unsigned long)tables[i].table; \
946 } \
947 \
948 return NULL; \
949}
950GET_EFI_CONFIG_TABLE(32)
951GET_EFI_CONFIG_TABLE(64)
952
930void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) 953void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
931{ 954{
932 efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables; 955 if (efi_is_64bit())
933 int i; 956 return get_efi_config_table64(sys_table, guid);
934 957 else
935 for (i = 0; i < sys_table->nr_tables; i++) { 958 return get_efi_config_table32(sys_table, guid);
936 if (efi_guidcmp(tables[i].guid, guid) != 0)
937 continue;
938
939 return (void *)tables[i].table;
940 }
941
942 return NULL;
943} 959}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index ab3aa3983833..7e12cbdf957c 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
84MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
85MODULE_VERSION(IBFT_ISCSI_VERSION); 85MODULE_VERSION(IBFT_ISCSI_VERSION);
86 86
87#ifndef CONFIG_ISCSI_IBFT_FIND
88struct acpi_table_ibft *ibft_addr;
89#endif
90
87struct ibft_hdr { 91struct ibft_hdr {
88 u8 id; 92 u8 id;
89 u8 version; 93 u8 version;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 474f304ec109..cdd4f73b4869 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT
40config FPGA_MGR_ALTERA_PS_SPI 40config FPGA_MGR_ALTERA_PS_SPI
41 tristate "Altera FPGA Passive Serial over SPI" 41 tristate "Altera FPGA Passive Serial over SPI"
42 depends on SPI 42 depends on SPI
43 select BITREVERSE
43 help 44 help
44 FPGA manager driver support for Altera Arria/Cyclone/Stratix 45 FPGA manager driver support for Altera Arria/Cyclone/Stratix
45 using the passive serial interface over SPI. 46 using the passive serial interface over SPI.
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index a13f224303c6..0221dee8dd4c 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
210 return -EIO; 210 return -EIO;
211 } 211 }
212 212
213 if (!IS_ERR(conf->confd)) { 213 if (conf->confd) {
214 if (!gpiod_get_raw_value_cansleep(conf->confd)) { 214 if (!gpiod_get_raw_value_cansleep(conf->confd)) {
215 dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); 215 dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
216 return -EIO; 216 return -EIO;
@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
289 return PTR_ERR(conf->status); 289 return PTR_ERR(conf->status);
290 } 290 }
291 291
292 conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); 292 conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
293 if (IS_ERR(conf->confd)) { 293 if (IS_ERR(conf->confd)) {
294 dev_warn(&spi->dev, "Not using confd gpio: %ld\n", 294 dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
295 PTR_ERR(conf->confd)); 295 PTR_ERR(conf->confd));
296 return PTR_ERR(conf->confd);
297 } else if (!conf->confd) {
298 dev_warn(&spi->dev, "Not using confd gpio");
296 } 299 }
297 300
298 /* Register manager with unique name */ 301 /* Register manager with unique name */
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 343153d47e5b..004dc03ccf09 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -38,8 +38,7 @@
38#define SCOM_STATUS_PIB_RESP_MASK 0x00007000 38#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
39#define SCOM_STATUS_PIB_RESP_SHIFT 12 39#define SCOM_STATUS_PIB_RESP_SHIFT 12
40 40
41#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \ 41#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
42 SCOM_STATUS_PROTECTION | \
43 SCOM_STATUS_PARITY | \ 42 SCOM_STATUS_PARITY | \
44 SCOM_STATUS_PIB_ABORT | \ 43 SCOM_STATUS_PIB_ABORT | \
45 SCOM_STATUS_PIB_RESP_MASK) 44 SCOM_STATUS_PIB_RESP_MASK)
@@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
251 /* Return -EBUSY on PIB abort to force a retry */ 250 /* Return -EBUSY on PIB abort to force a retry */
252 if (status & SCOM_STATUS_PIB_ABORT) 251 if (status & SCOM_STATUS_PIB_ABORT)
253 return -EBUSY; 252 return -EBUSY;
254 if (status & SCOM_STATUS_ERR_SUMMARY) {
255 fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
256 sizeof(uint32_t));
257 return -EIO;
258 }
259 return 0; 253 return 0;
260} 254}
261 255
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 567fb98c0892..9762dd6d99fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
363 /* Special handling for SPI GPIOs if used */ 363 /* Special handling for SPI GPIOs if used */
364 if (IS_ERR(desc)) 364 if (IS_ERR(desc))
365 desc = of_find_spi_gpio(dev, con_id, &of_flags); 365 desc = of_find_spi_gpio(dev, con_id, &of_flags);
366 if (IS_ERR(desc)) { 366 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
367 /* This quirk looks up flags and all */ 367 /* This quirk looks up flags and all */
368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); 368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
369 if (!IS_ERR(desc)) 369 if (!IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3ee99d070608..cca749010cd0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
956 } 956 }
957 957
958 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 958 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
959 irqflags |= IRQF_TRIGGER_RISING; 959 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
960 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
960 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 961 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
961 irqflags |= IRQF_TRIGGER_FALLING; 962 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
963 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
962 irqflags |= IRQF_ONESHOT; 964 irqflags |= IRQF_ONESHOT;
963 965
964 INIT_KFIFO(le->events); 966 INIT_KFIFO(le->events);
@@ -1089,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1089 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1090 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; 1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
1091 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1092 lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; 1094 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
1095 GPIOLINE_FLAG_IS_OUT);
1093 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 1096 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
1094 lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; 1097 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
1098 GPIOLINE_FLAG_IS_OUT);
1095 1099
1096 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) 1100 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
1097 return -EFAULT; 1101 return -EFAULT;
@@ -1369,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1369 if (status) 1373 if (status)
1370 goto err_remove_from_list; 1374 goto err_remove_from_list;
1371 1375
1372 status = gpiochip_irqchip_init_valid_mask(chip);
1373 if (status)
1374 goto err_remove_from_list;
1375
1376 status = gpiochip_alloc_valid_mask(chip); 1376 status = gpiochip_alloc_valid_mask(chip);
1377 if (status) 1377 if (status)
1378 goto err_remove_irqchip_mask; 1378 goto err_remove_from_list;
1379
1380 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1381 if (status)
1382 goto err_free_gpiochip_mask;
1383 1379
1384 status = of_gpiochip_add(chip); 1380 status = of_gpiochip_add(chip);
1385 if (status) 1381 if (status)
1386 goto err_remove_chip; 1382 goto err_free_gpiochip_mask;
1387 1383
1388 status = gpiochip_init_valid_mask(chip); 1384 status = gpiochip_init_valid_mask(chip);
1389 if (status) 1385 if (status)
@@ -1392,18 +1388,31 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1392 for (i = 0; i < chip->ngpio; i++) { 1388 for (i = 0; i < chip->ngpio; i++) {
1393 struct gpio_desc *desc = &gdev->descs[i]; 1389 struct gpio_desc *desc = &gdev->descs[i];
1394 1390
1395 if (chip->get_direction && gpiochip_line_is_valid(chip, i)) 1391 if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
1396 desc->flags = !chip->get_direction(chip, i) ? 1392 if (!chip->get_direction(chip, i))
1397 (1 << FLAG_IS_OUT) : 0; 1393 set_bit(FLAG_IS_OUT, &desc->flags);
1398 else 1394 else
1399 desc->flags = !chip->direction_input ? 1395 clear_bit(FLAG_IS_OUT, &desc->flags);
1400 (1 << FLAG_IS_OUT) : 0; 1396 } else {
1397 if (!chip->direction_input)
1398 set_bit(FLAG_IS_OUT, &desc->flags);
1399 else
1400 clear_bit(FLAG_IS_OUT, &desc->flags);
1401 }
1401 } 1402 }
1402 1403
1403 acpi_gpiochip_add(chip); 1404 acpi_gpiochip_add(chip);
1404 1405
1405 machine_gpiochip_add(chip); 1406 machine_gpiochip_add(chip);
1406 1407
1408 status = gpiochip_irqchip_init_valid_mask(chip);
1409 if (status)
1410 goto err_remove_acpi_chip;
1411
1412 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1413 if (status)
1414 goto err_remove_irqchip_mask;
1415
1407 /* 1416 /*
1408 * By first adding the chardev, and then adding the device, 1417 * By first adding the chardev, and then adding the device,
1409 * we get a device node entry in sysfs under 1418 * we get a device node entry in sysfs under
@@ -1415,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1415 if (gpiolib_initialized) { 1424 if (gpiolib_initialized) {
1416 status = gpiochip_setup_dev(gdev); 1425 status = gpiochip_setup_dev(gdev);
1417 if (status) 1426 if (status)
1418 goto err_remove_acpi_chip; 1427 goto err_remove_irqchip;
1419 } 1428 }
1420 return 0; 1429 return 0;
1421 1430
1431err_remove_irqchip:
1432 gpiochip_irqchip_remove(chip);
1433err_remove_irqchip_mask:
1434 gpiochip_irqchip_free_valid_mask(chip);
1422err_remove_acpi_chip: 1435err_remove_acpi_chip:
1423 acpi_gpiochip_remove(chip); 1436 acpi_gpiochip_remove(chip);
1424err_remove_of_chip: 1437err_remove_of_chip:
1425 gpiochip_free_hogs(chip); 1438 gpiochip_free_hogs(chip);
1426 of_gpiochip_remove(chip); 1439 of_gpiochip_remove(chip);
1427err_remove_chip:
1428 gpiochip_irqchip_remove(chip);
1429err_free_gpiochip_mask: 1440err_free_gpiochip_mask:
1430 gpiochip_free_valid_mask(chip); 1441 gpiochip_free_valid_mask(chip);
1431err_remove_irqchip_mask:
1432 gpiochip_irqchip_free_valid_mask(chip);
1433err_remove_from_list: 1442err_remove_from_list:
1434 spin_lock_irqsave(&gpio_lock, flags); 1443 spin_lock_irqsave(&gpio_lock, flags);
1435 list_del(&gdev->list); 1444 list_del(&gdev->list);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1d80222587ad..3c88420e3497 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -394,7 +394,7 @@ config DRM_R128
394config DRM_I810 394config DRM_I810
395 tristate "Intel I810" 395 tristate "Intel I810"
396 # !PREEMPT because of missing ioctl locking 396 # !PREEMPT because of missing ioctl locking
397 depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) 397 depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
398 help 398 help
399 Choose this option if you have an Intel I810 graphics card. If M is 399 Choose this option if you have an Intel I810 graphics card. If M is
400 selected, the module will be called i810. AGP support is required 400 selected, the module will be called i810. AGP support is required
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1d3ee9c42f7e..6a5c96e519b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1140 adev->asic_type != CHIP_FIJI && 1140 adev->asic_type != CHIP_FIJI &&
1141 adev->asic_type != CHIP_POLARIS10 && 1141 adev->asic_type != CHIP_POLARIS10 &&
1142 adev->asic_type != CHIP_POLARIS11 && 1142 adev->asic_type != CHIP_POLARIS11 &&
1143 adev->asic_type != CHIP_POLARIS12) ? 1143 adev->asic_type != CHIP_POLARIS12 &&
1144 adev->asic_type != CHIP_VEGAM) ?
1144 VI_BO_SIZE_ALIGN : 1; 1145 VI_BO_SIZE_ALIGN : 1;
1145 1146
1146 mapping_flags = AMDGPU_VM_PAGE_READABLE; 1147 mapping_flags = AMDGPU_VM_PAGE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 9b384a94d2f3..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, 579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e069de8b54e6..8b26c970a3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1044 return r; 1044 return r;
1045 } 1045 }
1046 1046
1047 fence = amdgpu_ctx_get_fence(ctx, entity, 1047 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
1048 deps[i].handle); 1048 amdgpu_ctx_put(ctx);
1049
1050 if (IS_ERR(fence))
1051 return PTR_ERR(fence);
1052 else if (!fence)
1053 continue;
1049 1054
1050 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 1055 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1051 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); 1056 struct drm_sched_fence *s_fence;
1052 struct dma_fence *old = fence; 1057 struct dma_fence *old = fence;
1053 1058
1059 s_fence = to_drm_sched_fence(fence);
1054 fence = dma_fence_get(&s_fence->scheduled); 1060 fence = dma_fence_get(&s_fence->scheduled);
1055 dma_fence_put(old); 1061 dma_fence_put(old);
1056 } 1062 }
1057 1063
1058 if (IS_ERR(fence)) { 1064 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1059 r = PTR_ERR(fence); 1065 dma_fence_put(fence);
1060 amdgpu_ctx_put(ctx); 1066 if (r)
1061 return r; 1067 return r;
1062 } else if (fence) {
1063 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
1064 true);
1065 dma_fence_put(fence);
1066 amdgpu_ctx_put(ctx);
1067 if (r)
1068 return r;
1069 }
1070 } 1068 }
1071 return 0; 1069 return 0;
1072} 1070}
@@ -1145,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1145 num_deps = chunk->length_dw * 4 / 1143 num_deps = chunk->length_dw * 4 /
1146 sizeof(struct drm_amdgpu_cs_chunk_sem); 1144 sizeof(struct drm_amdgpu_cs_chunk_sem);
1147 1145
1146 if (p->post_deps)
1147 return -EINVAL;
1148
1148 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1149 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1149 GFP_KERNEL); 1150 GFP_KERNEL);
1150 p->num_post_deps = 0; 1151 p->num_post_deps = 0;
@@ -1168,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1168 1169
1169 1170
1170static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, 1171static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1171 struct amdgpu_cs_chunk 1172 struct amdgpu_cs_chunk *chunk)
1172 *chunk)
1173{ 1173{
1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1175 unsigned num_deps; 1175 unsigned num_deps;
@@ -1179,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
1179 num_deps = chunk->length_dw * 4 / 1179 num_deps = chunk->length_dw * 4 /
1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1181 1181
1182 if (p->post_deps)
1183 return -EINVAL;
1184
1182 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1185 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1183 GFP_KERNEL); 1186 GFP_KERNEL);
1184 p->num_post_deps = 0; 1187 p->num_post_deps = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..7398b4850649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
534 struct drm_sched_entity *entity) 534 struct drm_sched_entity *entity)
535{ 535{
536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
537 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); 537 struct dma_fence *other;
538 struct dma_fence *other = centity->fences[idx]; 538 unsigned idx;
539 long r;
539 540
540 if (other) { 541 spin_lock(&ctx->ring_lock);
541 signed long r; 542 idx = centity->sequence & (amdgpu_sched_jobs - 1);
542 r = dma_fence_wait(other, true); 543 other = dma_fence_get(centity->fences[idx]);
543 if (r < 0) { 544 spin_unlock(&ctx->ring_lock);
544 if (r != -ERESTARTSYS)
545 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
546 545
547 return r; 546 if (!other)
548 } 547 return 0;
549 }
550 548
551 return 0; 549 r = dma_fence_wait(other, true);
550 if (r < 0 && r != -ERESTARTSYS)
551 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
552
553 dma_fence_put(other);
554 return r;
552} 555}
553 556
554void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 557void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 6d54decef7f8..5652cc72ed3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
707 thread = (*pos & GENMASK_ULL(59, 52)) >> 52; 707 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
708 bank = (*pos & GENMASK_ULL(61, 60)) >> 60; 708 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
709 709
710 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); 710 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
711 if (!data) 711 if (!data)
712 return -ENOMEM; 712 return -ENOMEM;
713 713
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2e8b4238efd..5376328d3fd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = {
148 .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), 148 .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
149}; 149};
150int amdgpu_ras_enable = -1; 150int amdgpu_ras_enable = -1;
151uint amdgpu_ras_mask = 0xffffffff; 151uint amdgpu_ras_mask = 0xfffffffb;
152 152
153/** 153/**
154 * DOC: vramlimit (int) 154 * DOC: vramlimit (int)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index df8a23554831..f6ac1e9548f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -32,7 +32,6 @@ struct amdgpu_gds {
32 uint32_t gws_size; 32 uint32_t gws_size;
33 uint32_t oa_size; 33 uint32_t oa_size;
34 uint32_t gds_compute_max_wave_id; 34 uint32_t gds_compute_max_wave_id;
35 uint32_t vgt_gs_max_wave_id;
36}; 35};
37 36
38struct amdgpu_gds_reg_offset { 37struct amdgpu_gds_reg_offset {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8b7efd0a7028..2b546567853b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
159 struct amdgpu_device *adev = ddev->dev_private; 159 struct amdgpu_device *adev = ddev->dev_private;
160 enum amd_pm_state_type pm; 160 enum amd_pm_state_type pm;
161 161
162 if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) 162 if (is_support_sw_smu(adev)) {
163 pm = amdgpu_smu_get_current_power_state(adev); 163 if (adev->smu.ppt_funcs->get_current_power_state)
164 else if (adev->powerplay.pp_funcs->get_current_power_state) 164 pm = amdgpu_smu_get_current_power_state(adev);
165 else
166 pm = adev->pm.dpm.user_state;
167 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
165 pm = amdgpu_dpm_get_current_power_state(adev); 168 pm = amdgpu_dpm_get_current_power_state(adev);
166 else 169 } else {
167 pm = adev->pm.dpm.user_state; 170 pm = adev->pm.dpm.user_state;
171 }
168 172
169 return snprintf(buf, PAGE_SIZE, "%s\n", 173 return snprintf(buf, PAGE_SIZE, "%s\n",
170 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 174 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
191 goto fail; 195 goto fail;
192 } 196 }
193 197
194 if (adev->powerplay.pp_funcs->dispatch_tasks) { 198 if (is_support_sw_smu(adev)) {
199 mutex_lock(&adev->pm.mutex);
200 adev->pm.dpm.user_state = state;
201 mutex_unlock(&adev->pm.mutex);
202 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
195 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 203 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
196 } else { 204 } else {
197 mutex_lock(&adev->pm.mutex); 205 mutex_lock(&adev->pm.mutex);
@@ -1734,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
1734 return -EINVAL; 1742 return -EINVAL;
1735 1743
1736 if (is_support_sw_smu(adev)) { 1744 if (is_support_sw_smu(adev)) {
1737 err = smu_get_current_rpm(&adev->smu, &speed); 1745 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
1738 if (err) 1746 if (err)
1739 return err; 1747 return err;
1740 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1748 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
@@ -1794,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
1794 return -EINVAL; 1802 return -EINVAL;
1795 1803
1796 if (is_support_sw_smu(adev)) { 1804 if (is_support_sw_smu(adev)) {
1797 err = smu_get_current_rpm(&adev->smu, &rpm); 1805 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
1798 if (err) 1806 if (err)
1799 return err; 1807 return err;
1800 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1808 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
3067 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3075 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3068 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3076 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3069 3077
3070 /* UVD clocks */ 3078 if (adev->asic_type > CHIP_VEGA20) {
3071 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3079 /* VCN clocks */
3072 if (!value) { 3080 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3073 seq_printf(m, "UVD: Disabled\n"); 3081 if (!value) {
3074 } else { 3082 seq_printf(m, "VCN: Disabled\n");
3075 seq_printf(m, "UVD: Enabled\n"); 3083 } else {
3076 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3084 seq_printf(m, "VCN: Enabled\n");
3077 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3085 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3078 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3086 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3079 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3087 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3088 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3089 }
3080 } 3090 }
3081 } 3091 seq_printf(m, "\n");
3082 seq_printf(m, "\n"); 3092 } else {
3093 /* UVD clocks */
3094 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3095 if (!value) {
3096 seq_printf(m, "UVD: Disabled\n");
3097 } else {
3098 seq_printf(m, "UVD: Enabled\n");
3099 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3100 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3101 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3102 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3103 }
3104 }
3105 seq_printf(m, "\n");
3083 3106
3084 /* VCE clocks */ 3107 /* VCE clocks */
3085 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3108 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3086 if (!value) { 3109 if (!value) {
3087 seq_printf(m, "VCE: Disabled\n"); 3110 seq_printf(m, "VCE: Disabled\n");
3088 } else { 3111 } else {
3089 seq_printf(m, "VCE: Enabled\n"); 3112 seq_printf(m, "VCE: Enabled\n");
3090 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3113 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3091 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3114 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3115 }
3092 } 3116 }
3093 } 3117 }
3094 3118
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1a4412e47810..fac7aa2c244f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
136static int amdgpu_ras_release_vram(struct amdgpu_device *adev, 136static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
137 struct amdgpu_bo **bo_ptr); 137 struct amdgpu_bo **bo_ptr);
138 138
139static void amdgpu_ras_self_test(struct amdgpu_device *adev)
140{
141 /* TODO */
142}
143
144static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 139static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
145 size_t size, loff_t *pos) 140 size_t size, loff_t *pos)
146{ 141{
@@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
689 if (!obj) 684 if (!obj)
690 return -EINVAL; 685 return -EINVAL;
691 686
687 if (block_info.block_id != TA_RAS_BLOCK__UMC) {
688 DRM_INFO("%s error injection is not supported yet\n",
689 ras_block_str(info->head.block));
690 return -EINVAL;
691 }
692
692 ret = psp_ras_trigger_error(&adev->psp, &block_info); 693 ret = psp_ras_trigger_error(&adev->psp, &block_info);
693 if (ret) 694 if (ret)
694 DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", 695 DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
@@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
1557 1558
1558 amdgpu_ras_check_supported(adev, &con->hw_supported, 1559 amdgpu_ras_check_supported(adev, &con->hw_supported,
1559 &con->supported); 1560 &con->supported);
1561 if (!con->hw_supported) {
1562 amdgpu_ras_set_context(adev, NULL);
1563 kfree(con);
1564 return 0;
1565 }
1566
1560 con->features = 0; 1567 con->features = 0;
1561 INIT_LIST_HEAD(&con->head); 1568 INIT_LIST_HEAD(&con->head);
1562 /* Might need get this flag from vbios. */ 1569 /* Might need get this flag from vbios. */
@@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
1570 if (amdgpu_ras_fs_init(adev)) 1577 if (amdgpu_ras_fs_init(adev))
1571 goto fs_out; 1578 goto fs_out;
1572 1579
1573 amdgpu_ras_self_test(adev);
1574
1575 DRM_INFO("RAS INFO: ras initialized successfully, " 1580 DRM_INFO("RAS INFO: ras initialized successfully, "
1576 "hardware ability[%x] ras_mask[%x]\n", 1581 "hardware ability[%x] ras_mask[%x]\n",
1577 con->hw_supported, con->supported); 1582 con->hw_supported, con->supported);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 99f14fcc1460..19661c645703 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -30,6 +30,7 @@
30#define AMDGPU_VCN_FIRMWARE_OFFSET 256 30#define AMDGPU_VCN_FIRMWARE_OFFSET 256
31#define AMDGPU_VCN_MAX_ENC_RINGS 3 31#define AMDGPU_VCN_MAX_ENC_RINGS 3
32 32
33#define VCN_DEC_KMD_CMD 0x80000000
33#define VCN_DEC_CMD_FENCE 0x00000000 34#define VCN_DEC_CMD_FENCE 0x00000000
34#define VCN_DEC_CMD_TRAP 0x00000001 35#define VCN_DEC_CMD_TRAP 0x00000001
35#define VCN_DEC_CMD_WRITE_REG 0x00000004 36#define VCN_DEC_CMD_WRITE_REG 0x00000004
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1675d5837c3c..f41287f9000d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1441 } 1441 }
1442 nv_grbm_select(adev, 0, 0, 0, 0); 1442 nv_grbm_select(adev, 0, 0, 0, 0);
1443 mutex_unlock(&adev->srbm_mutex); 1443 mutex_unlock(&adev->srbm_mutex);
1444
1445 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1446 acccess. These should be enabled by FW for target VMIDs. */
1447 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1448 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1449 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1450 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1451 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1452 }
1444} 1453}
1445 1454
1446static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) 1455static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
@@ -4197,15 +4206,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4197 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4206 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4198 u32 header, control = 0; 4207 u32 header, control = 0;
4199 4208
4200 /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
4201 * This resets the wave ID counters. (needed by transform feedback)
4202 * TODO: This might only be needed on a VMID switch when we change
4203 * the GDS OA mapping, not sure.
4204 */
4205 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4206 amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
4207 amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
4208
4209 if (ib->flags & AMDGPU_IB_FLAG_CE) 4209 if (ib->flags & AMDGPU_IB_FLAG_CE)
4210 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2); 4210 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4211 else 4211 else
@@ -4611,6 +4611,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4611 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4611 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4612 TIME_STAMP_INT_ENABLE, 0); 4612 TIME_STAMP_INT_ENABLE, 0);
4613 WREG32(cp_int_cntl_reg, cp_int_cntl); 4613 WREG32(cp_int_cntl_reg, cp_int_cntl);
4614 break;
4614 case AMDGPU_IRQ_STATE_ENABLE: 4615 case AMDGPU_IRQ_STATE_ENABLE:
4615 cp_int_cntl = RREG32(cp_int_cntl_reg); 4616 cp_int_cntl = RREG32(cp_int_cntl_reg);
4616 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4617 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
@@ -4951,7 +4952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
4951 5 + /* HDP_INVL */ 4952 5 + /* HDP_INVL */
4952 8 + 8 + /* FENCE x2 */ 4953 8 + 8 + /* FENCE x2 */
4953 2, /* SWITCH_BUFFER */ 4954 2, /* SWITCH_BUFFER */
4954 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */ 4955 .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
4955 .emit_ib = gfx_v10_0_ring_emit_ib_gfx, 4956 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
4956 .emit_fence = gfx_v10_0_ring_emit_fence, 4957 .emit_fence = gfx_v10_0_ring_emit_fence,
4957 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, 4958 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
@@ -5102,7 +5103,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5102 default: 5103 default:
5103 adev->gds.gds_size = 0x10000; 5104 adev->gds.gds_size = 0x10000;
5104 adev->gds.gds_compute_max_wave_id = 0x4ff; 5105 adev->gds.gds_compute_max_wave_id = 0x4ff;
5105 adev->gds.vgt_gs_max_wave_id = 0x3ff;
5106 break; 5106 break;
5107 } 5107 }
5108 5108
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0db9f488da7e..21187275dfd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1879 } 1879 }
1880 cik_srbm_select(adev, 0, 0, 0, 0); 1880 cik_srbm_select(adev, 0, 0, 0, 0);
1881 mutex_unlock(&adev->srbm_mutex); 1881 mutex_unlock(&adev->srbm_mutex);
1882
1883 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1884 acccess. These should be enabled by FW for target VMIDs. */
1885 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1886 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
1887 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
1888 WREG32(amdgpu_gds_reg_offset[i].gws, 0);
1889 WREG32(amdgpu_gds_reg_offset[i].oa, 0);
1890 }
1882} 1891}
1883 1892
1884static void gfx_v7_0_config_init(struct amdgpu_device *adev) 1893static void gfx_v7_0_config_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5f401b41ef7c..ee1ccdcf2d30 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1321 return 0; 1321 return 0;
1322} 1322}
1323 1323
1324static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
1325{
1326 int r;
1327
1328 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1329 if (unlikely(r != 0))
1330 return r;
1331
1332 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1333 AMDGPU_GEM_DOMAIN_VRAM);
1334 if (!r)
1335 adev->gfx.rlc.clear_state_gpu_addr =
1336 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1337
1338 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1339
1340 return r;
1341}
1342
1343static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
1344{
1345 int r;
1346
1347 if (!adev->gfx.rlc.clear_state_obj)
1348 return;
1349
1350 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1351 if (likely(r == 0)) {
1352 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1353 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1354 }
1355}
1356
1324static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) 1357static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1325{ 1358{
1326 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1359 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -3706,6 +3739,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3706 } 3739 }
3707 vi_srbm_select(adev, 0, 0, 0, 0); 3740 vi_srbm_select(adev, 0, 0, 0, 0);
3708 mutex_unlock(&adev->srbm_mutex); 3741 mutex_unlock(&adev->srbm_mutex);
3742
3743 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
3744 acccess. These should be enabled by FW for target VMIDs. */
3745 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
3746 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3747 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3748 WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3749 WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3750 }
3709} 3751}
3710 3752
3711static void gfx_v8_0_config_init(struct amdgpu_device *adev) 3753static void gfx_v8_0_config_init(struct amdgpu_device *adev)
@@ -4776,6 +4818,10 @@ static int gfx_v8_0_hw_init(void *handle)
4776 gfx_v8_0_init_golden_registers(adev); 4818 gfx_v8_0_init_golden_registers(adev);
4777 gfx_v8_0_constants_init(adev); 4819 gfx_v8_0_constants_init(adev);
4778 4820
4821 r = gfx_v8_0_csb_vram_pin(adev);
4822 if (r)
4823 return r;
4824
4779 r = adev->gfx.rlc.funcs->resume(adev); 4825 r = adev->gfx.rlc.funcs->resume(adev);
4780 if (r) 4826 if (r)
4781 return r; 4827 return r;
@@ -4892,6 +4938,9 @@ static int gfx_v8_0_hw_fini(void *handle)
4892 else 4938 else
4893 pr_err("rlc is busy, skip halt rlc\n"); 4939 pr_err("rlc is busy, skip halt rlc\n");
4894 amdgpu_gfx_rlc_exit_safe_mode(adev); 4940 amdgpu_gfx_rlc_exit_safe_mode(adev);
4941
4942 gfx_v8_0_csb_vram_unpin(adev);
4943
4895 return 0; 4944 return 0;
4896} 4945}
4897 4946
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f4c4eea62526..c066e1d3f981 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -596,14 +596,18 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
596 case CHIP_VEGA20: 596 case CHIP_VEGA20:
597 break; 597 break;
598 case CHIP_RAVEN: 598 case CHIP_RAVEN:
599 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 599 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
600 break; 600 &&((adev->gfx.rlc_fw_version != 106 &&
601 if ((adev->gfx.rlc_fw_version != 106 && 601 adev->gfx.rlc_fw_version < 531) ||
602 adev->gfx.rlc_fw_version < 531) || 602 (adev->gfx.rlc_fw_version == 53815) ||
603 (adev->gfx.rlc_fw_version == 53815) || 603 (adev->gfx.rlc_feature_version < 1) ||
604 (adev->gfx.rlc_feature_version < 1) || 604 !adev->gfx.rlc.is_rlc_v2_1))
605 !adev->gfx.rlc.is_rlc_v2_1)
606 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 605 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
606
607 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
608 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
609 AMD_PG_SUPPORT_CP |
610 AMD_PG_SUPPORT_RLC_SMU_HS;
607 break; 611 break;
608 default: 612 default:
609 break; 613 break;
@@ -1918,6 +1922,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1918 } 1922 }
1919 soc15_grbm_select(adev, 0, 0, 0, 0); 1923 soc15_grbm_select(adev, 0, 0, 0, 0);
1920 mutex_unlock(&adev->srbm_mutex); 1924 mutex_unlock(&adev->srbm_mutex);
1925
1926 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1927 acccess. These should be enabled by FW for target VMIDs. */
1928 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1929 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1930 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1931 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1932 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1933 }
1921} 1934}
1922 1935
1923static void gfx_v9_0_constants_init(struct amdgpu_device *adev) 1936static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -4860,7 +4873,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4860 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4873 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4861 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4874 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4862 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4875 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4863 WREG32(mmSQ_CMD, value); 4876 WREG32_SOC15(GC, 0, mmSQ_CMD, value);
4864} 4877}
4865 4878
4866static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4879static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..9922bce3fd89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
552 AMD_CG_SUPPORT_BIF_LS; 552 AMD_CG_SUPPORT_BIF_LS;
553 adev->pg_flags = AMD_PG_SUPPORT_VCN | 553 adev->pg_flags = AMD_PG_SUPPORT_VCN |
554 AMD_PG_SUPPORT_VCN_DPG | 554 AMD_PG_SUPPORT_VCN_DPG |
555 AMD_PG_SUPPORT_MMHUB |
556 AMD_PG_SUPPORT_ATHUB; 555 AMD_PG_SUPPORT_ATHUB;
557 adev->external_rev_id = adev->rev_id + 0x1; 556 adev->external_rev_id = adev->rev_id + 0x1;
558 break; 557 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..04fbf05d7176 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
992 992
993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
994 } 994 }
995
996 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
997 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
998 AMD_PG_SUPPORT_CP |
999 AMD_PG_SUPPORT_RLC_SMU_HS;
1000 break; 995 break;
1001 default: 996 default:
1002 /* FIXME: not supported yet */ 997 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 988c0adaca91..dfde886cc6bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
372 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 372 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
373 upper_32_bits(adev->vcn.gpu_addr)); 373 upper_32_bits(adev->vcn.gpu_addr));
374 offset = size; 374 offset = size;
375 /* No signed header for now from firmware
376 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 375 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
377 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 376 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
378 */
379 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
380 } 377 }
381 378
382 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); 379 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
@@ -1488,7 +1485,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1488 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); 1485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1489 amdgpu_ring_write(ring, 0); 1486 amdgpu_ring_write(ring, 0);
1490 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); 1487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1491 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); 1488 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1492} 1489}
1493 1490
1494/** 1491/**
@@ -1501,7 +1498,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1501static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) 1498static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1502{ 1499{
1503 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); 1500 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1504 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); 1501 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1505} 1502}
1506 1503
1507/** 1504/**
@@ -1546,7 +1543,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
1546 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 1543 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1547 1544
1548 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); 1545 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1549 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); 1546 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1550 1547
1551 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); 1548 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1552 amdgpu_ring_write(ring, 0); 1549 amdgpu_ring_write(ring, 0);
@@ -1556,7 +1553,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
1556 1553
1557 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); 1554 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1558 1555
1559 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); 1556 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1560} 1557}
1561 1558
1562/** 1559/**
@@ -1600,7 +1597,7 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1600 1597
1601 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); 1598 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1602 1599
1603 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); 1600 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1604} 1601}
1605 1602
1606static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, 1603static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -1629,7 +1626,7 @@ static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1629 1626
1630 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); 1627 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1631 1628
1632 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); 1629 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1633} 1630}
1634 1631
1635/** 1632/**
@@ -2082,6 +2079,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
2082 return 0; 2079 return 0;
2083} 2080}
2084 2081
2082static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
2083{
2084 struct amdgpu_device *adev = ring->adev;
2085 uint32_t tmp = 0;
2086 unsigned i;
2087 int r;
2088
2089 WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
2090 r = amdgpu_ring_alloc(ring, 4);
2091 if (r)
2092 return r;
2093 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
2094 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
2095 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
2096 amdgpu_ring_write(ring, 0xDEADBEEF);
2097 amdgpu_ring_commit(ring);
2098 for (i = 0; i < adev->usec_timeout; i++) {
2099 tmp = RREG32(adev->vcn.external.scratch9);
2100 if (tmp == 0xDEADBEEF)
2101 break;
2102 DRM_UDELAY(1);
2103 }
2104
2105 if (i >= adev->usec_timeout)
2106 r = -ETIMEDOUT;
2107
2108 return r;
2109}
2110
2111
2085static int vcn_v2_0_set_powergating_state(void *handle, 2112static int vcn_v2_0_set_powergating_state(void *handle,
2086 enum amd_powergating_state state) 2113 enum amd_powergating_state state)
2087{ 2114{
@@ -2145,7 +2172,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2145 .emit_ib = vcn_v2_0_dec_ring_emit_ib, 2172 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2146 .emit_fence = vcn_v2_0_dec_ring_emit_fence, 2173 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2147 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 2174 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2148 .test_ring = amdgpu_vcn_dec_ring_test_ring, 2175 .test_ring = vcn_v2_0_dec_ring_test_ring,
2149 .test_ib = amdgpu_vcn_dec_ring_test_ib, 2176 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2150 .insert_nop = vcn_v2_0_dec_ring_insert_nop, 2177 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2151 .insert_start = vcn_v2_0_dec_ring_insert_start, 2178 .insert_start = vcn_v2_0_dec_ring_insert_start,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 26b15cc56c31..1d3cd5c50d5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1567,32 +1567,6 @@ copy_from_user_failed:
1567 return err; 1567 return err;
1568} 1568}
1569 1569
1570static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1571 struct kfd_process *p, void *data)
1572{
1573 int retval;
1574 struct kfd_ioctl_alloc_queue_gws_args *args = data;
1575 struct kfd_dev *dev;
1576
1577 if (!hws_gws_support)
1578 return -ENODEV;
1579
1580 dev = kfd_device_by_id(args->gpu_id);
1581 if (!dev) {
1582 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
1583 return -ENODEV;
1584 }
1585 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
1586 return -ENODEV;
1587
1588 mutex_lock(&p->mutex);
1589 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1590 mutex_unlock(&p->mutex);
1591
1592 args->first_gws = 0;
1593 return retval;
1594}
1595
1596static int kfd_ioctl_get_dmabuf_info(struct file *filep, 1570static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1597 struct kfd_process *p, void *data) 1571 struct kfd_process *p, void *data)
1598{ 1572{
@@ -1795,8 +1769,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
1795 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF, 1769 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
1796 kfd_ioctl_import_dmabuf, 0), 1770 kfd_ioctl_import_dmabuf, 0),
1797 1771
1798 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
1799 kfd_ioctl_alloc_queue_gws, 0),
1800}; 1772};
1801 1773
1802#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) 1774#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 792371442195..4e3fc284f6ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
668 case CHIP_RAVEN: 668 case CHIP_RAVEN:
669 pcache_info = raven_cache_info; 669 pcache_info = raven_cache_info;
670 num_of_cache_types = ARRAY_SIZE(raven_cache_info); 670 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
671 break;
671 case CHIP_NAVI10: 672 case CHIP_NAVI10:
672 pcache_info = navi10_cache_info; 673 pcache_info = navi10_cache_info;
673 num_of_cache_types = ARRAY_SIZE(navi10_cache_info); 674 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4f8a6ffc5775..9cd3eb2d90bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
429 429
430 switch (type) { 430 switch (type) {
431 case KFD_MQD_TYPE_CP: 431 case KFD_MQD_TYPE_CP:
432 pr_debug("%s@%i\n", __func__, __LINE__);
433 case KFD_MQD_TYPE_COMPUTE: 432 case KFD_MQD_TYPE_COMPUTE:
434 pr_debug("%s@%i\n", __func__, __LINE__); 433 pr_debug("%s@%i\n", __func__, __LINE__);
435 mqd->allocate_mqd = allocate_mqd; 434 mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..45be7a2132bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3131,13 +3131,25 @@ static enum dc_color_depth
3131convert_color_depth_from_display_info(const struct drm_connector *connector, 3131convert_color_depth_from_display_info(const struct drm_connector *connector,
3132 const struct drm_connector_state *state) 3132 const struct drm_connector_state *state)
3133{ 3133{
3134 uint32_t bpc = connector->display_info.bpc; 3134 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3135
3136 /* Assume 8 bpc by default if no bpc is specified. */
3137 bpc = bpc ? bpc : 8;
3135 3138
3136 if (!state) 3139 if (!state)
3137 state = connector->state; 3140 state = connector->state;
3138 3141
3139 if (state) { 3142 if (state) {
3140 bpc = state->max_bpc; 3143 /*
3144 * Cap display bpc based on the user requested value.
3145 *
3146 * The value for state->max_bpc may not correctly updated
3147 * depending on when the connector gets added to the state
3148 * or if this was called outside of atomic check, so it
3149 * can't be used directly.
3150 */
3151 bpc = min(bpc, state->max_requested_bpc);
3152
3141 /* Round down to the nearest even number. */ 3153 /* Round down to the nearest even number. */
3142 bpc = bpc - (bpc & 1); 3154 bpc = bpc - (bpc & 1);
3143 } 3155 }
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index c1a92c16535c..5cc3acccda2a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -262,12 +262,12 @@ void dce110_clk_mgr_construct(
262 struct dc_context *ctx, 262 struct dc_context *ctx,
263 struct clk_mgr_internal *clk_mgr) 263 struct clk_mgr_internal *clk_mgr)
264{ 264{
265 dce_clk_mgr_construct(ctx, clk_mgr);
266
265 memcpy(clk_mgr->max_clks_by_state, 267 memcpy(clk_mgr->max_clks_by_state,
266 dce110_max_clks_by_state, 268 dce110_max_clks_by_state,
267 sizeof(dce110_max_clks_by_state)); 269 sizeof(dce110_max_clks_by_state));
268 270
269 dce_clk_mgr_construct(ctx, clk_mgr);
270
271 clk_mgr->regs = &disp_clk_regs; 271 clk_mgr->regs = &disp_clk_regs;
272 clk_mgr->clk_mgr_shift = &disp_clk_shift; 272 clk_mgr->clk_mgr_shift = &disp_clk_shift;
273 clk_mgr->clk_mgr_mask = &disp_clk_mask; 273 clk_mgr->clk_mgr_mask = &disp_clk_mask;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 778392c73187..7c746ef1e32e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -226,12 +226,12 @@ void dce112_clk_mgr_construct(
226 struct dc_context *ctx, 226 struct dc_context *ctx,
227 struct clk_mgr_internal *clk_mgr) 227 struct clk_mgr_internal *clk_mgr)
228{ 228{
229 dce_clk_mgr_construct(ctx, clk_mgr);
230
229 memcpy(clk_mgr->max_clks_by_state, 231 memcpy(clk_mgr->max_clks_by_state,
230 dce112_max_clks_by_state, 232 dce112_max_clks_by_state,
231 sizeof(dce112_max_clks_by_state)); 233 sizeof(dce112_max_clks_by_state));
232 234
233 dce_clk_mgr_construct(ctx, clk_mgr);
234
235 clk_mgr->regs = &disp_clk_regs; 235 clk_mgr->regs = &disp_clk_regs;
236 clk_mgr->clk_mgr_shift = &disp_clk_shift; 236 clk_mgr->clk_mgr_shift = &disp_clk_shift;
237 clk_mgr->clk_mgr_mask = &disp_clk_mask; 237 clk_mgr->clk_mgr_mask = &disp_clk_mask;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
index 906310c3e2eb..5399b8cf6b75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
@@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = {
127 127
128void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) 128void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
129{ 129{
130 dce_clk_mgr_construct(ctx, clk_mgr);
131
130 memcpy(clk_mgr->max_clks_by_state, 132 memcpy(clk_mgr->max_clks_by_state,
131 dce120_max_clks_by_state, 133 dce120_max_clks_by_state,
132 sizeof(dce120_max_clks_by_state)); 134 sizeof(dce120_max_clks_by_state));
133 135
134 dce_clk_mgr_construct(ctx, clk_mgr);
135
136 clk_mgr->base.dprefclk_khz = 600000; 136 clk_mgr->base.dprefclk_khz = 600000;
137 clk_mgr->base.funcs = &dce120_funcs; 137 clk_mgr->base.funcs = &dce120_funcs;
138} 138}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 08a774fc7b67..50bfb5921de0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
301void dcn2_init_clocks(struct clk_mgr *clk_mgr) 301void dcn2_init_clocks(struct clk_mgr *clk_mgr)
302{ 302{
303 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); 303 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
304 // Assumption is that boot state always supports pstate
305 clk_mgr->clks.p_state_change_support = true;
304} 306}
305 307
306void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) 308void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct(
331 struct dccg *dccg) 333 struct dccg *dccg)
332{ 334{
333 clk_mgr->base.ctx = ctx; 335 clk_mgr->base.ctx = ctx;
336 clk_mgr->pp_smu = pp_smu;
334 clk_mgr->base.funcs = &dcn2_funcs; 337 clk_mgr->base.funcs = &dcn2_funcs;
335 clk_mgr->regs = &clk_mgr_regs; 338 clk_mgr->regs = &clk_mgr_regs;
336 clk_mgr->clk_mgr_shift = &clk_mgr_shift; 339 clk_mgr->clk_mgr_shift = &clk_mgr_shift;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4ef4dc63e221..cbc480a33376 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h>
26 27
27#include "dm_services.h" 28#include "dm_services.h"
28 29
@@ -502,8 +503,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
502 503
503static void destruct(struct dc *dc) 504static void destruct(struct dc *dc)
504{ 505{
505 dc_release_state(dc->current_state); 506 if (dc->current_state) {
506 dc->current_state = NULL; 507 dc_release_state(dc->current_state);
508 dc->current_state = NULL;
509 }
507 510
508 destroy_links(dc); 511 destroy_links(dc);
509 512
@@ -1169,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
1169 1172
1170struct dc_state *dc_create_state(struct dc *dc) 1173struct dc_state *dc_create_state(struct dc *dc)
1171{ 1174{
1172 struct dc_state *context = kzalloc(sizeof(struct dc_state), 1175 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1173 GFP_KERNEL); 1176 GFP_KERNEL);
1174 1177
1175 if (!context) 1178 if (!context)
1176 return NULL; 1179 return NULL;
@@ -1190,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
1190struct dc_state *dc_copy_state(struct dc_state *src_ctx) 1193struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1191{ 1194{
1192 int i, j; 1195 int i, j;
1193 struct dc_state *new_ctx = kmemdup(src_ctx, 1196 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1194 sizeof(struct dc_state), GFP_KERNEL);
1195 1197
1196 if (!new_ctx) 1198 if (!new_ctx)
1197 return NULL; 1199 return NULL;
1200 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1198 1201
1199 for (i = 0; i < MAX_PIPES; i++) { 1202 for (i = 0; i < MAX_PIPES; i++) {
1200 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 1203 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1228,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
1228{ 1231{
1229 struct dc_state *context = container_of(kref, struct dc_state, refcount); 1232 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1230 dc_resource_state_destruct(context); 1233 dc_resource_state_destruct(context);
1231 kfree(context); 1234 kvfree(context);
1232} 1235}
1233 1236
1234void dc_release_state(struct dc_state *context) 1237void dc_release_state(struct dc_state *context)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 8dbf759eba45..355b4ba12796 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
532 uint32_t read_dpcd_retry_cnt = 10; 532 uint32_t read_dpcd_retry_cnt = 10;
533 enum dc_status status = DC_ERROR_UNEXPECTED; 533 enum dc_status status = DC_ERROR_UNEXPECTED;
534 int i; 534 int i;
535 union max_down_spread max_down_spread = { {0} };
535 536
536 // Read DPCD 00101h to find out the number of lanes currently set 537 // Read DPCD 00101h to find out the number of lanes currently set
537 for (i = 0; i < read_dpcd_retry_cnt; i++) { 538 for (i = 0; i < read_dpcd_retry_cnt; i++) {
@@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
553 msleep(8); 554 msleep(8);
554 } 555 }
555 556
556 ASSERT(status == DC_OK);
557
558 // Read DPCD 00100h to find if standard link rates are set 557 // Read DPCD 00100h to find if standard link rates are set
559 core_link_read_dpcd(link, DP_LINK_BW_SET, 558 core_link_read_dpcd(link, DP_LINK_BW_SET,
560 &link_bw_set, sizeof(link_bw_set)); 559 &link_bw_set, sizeof(link_bw_set));
@@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
576 link->cur_link_settings.link_rate = link_bw_set; 575 link->cur_link_settings.link_rate = link_bw_set;
577 link->cur_link_settings.use_link_rate_set = false; 576 link->cur_link_settings.use_link_rate_set = false;
578 } 577 }
578 // Read DPCD 00003h to find the max down spread.
579 core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
580 &max_down_spread.raw, sizeof(max_down_spread));
581 link->cur_link_settings.link_spread =
582 max_down_spread.bits.MAX_DOWN_SPREAD ?
583 LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
579} 584}
580 585
581static bool detect_dp( 586static bool detect_dp(
@@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
717 return false; 722 return false;
718 } 723 }
719 724
720 if (link->connector_signal == SIGNAL_TYPE_EDP) {
721 /* On detect, we want to make sure current link settings are
722 * up to date, especially if link was powered on by GOP.
723 */
724 read_edp_current_link_settings_on_detect(link);
725 }
726
727 prev_sink = link->local_sink; 725 prev_sink = link->local_sink;
728 if (prev_sink != NULL) { 726 if (prev_sink != NULL) {
729 dc_sink_retain(prev_sink); 727 dc_sink_retain(prev_sink);
@@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
765 } 763 }
766 764
767 case SIGNAL_TYPE_EDP: { 765 case SIGNAL_TYPE_EDP: {
766 read_edp_current_link_settings_on_detect(link);
768 detect_edp_sink_caps(link); 767 detect_edp_sink_caps(link);
769 sink_caps.transaction_type = 768 sink_caps.transaction_type =
770 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 769 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2329 if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { 2328 if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
2330 if (core_dc->current_state->res_ctx. 2329 if (core_dc->current_state->res_ctx.
2331 pipe_ctx[i].stream->link 2330 pipe_ctx[i].stream->link
2332 == link) 2331 == link) {
2333 /* DMCU -1 for all controller id values, 2332 /* DMCU -1 for all controller id values,
2334 * therefore +1 here 2333 * therefore +1 here
2335 */ 2334 */
@@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2337 core_dc->current_state-> 2336 core_dc->current_state->
2338 res_ctx.pipe_ctx[i].stream_res.tg->inst + 2337 res_ctx.pipe_ctx[i].stream_res.tg->inst +
2339 1; 2338 1;
2339
2340 /* Disable brightness ramping when the display is blanked
2341 * as it can hang the DMCU
2342 */
2343 if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
2344 frame_ramp = 0;
2345 }
2340 } 2346 }
2341 } 2347 }
2342 abm->funcs->set_backlight_level_pwm( 2348 abm->funcs->set_backlight_level_pwm(
@@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
2984 2990
2985 /* Retrain with preferred link settings only relevant for 2991 /* Retrain with preferred link settings only relevant for
2986 * DP signal type 2992 * DP signal type
2993 * Check for non-DP signal or if passive dongle present
2987 */ 2994 */
2988 if (!dc_is_dp_signal(link->connector_signal)) 2995 if (!dc_is_dp_signal(link->connector_signal) ||
2996 link->dongle_max_pix_clk > 0)
2989 return; 2997 return;
2990 2998
2991 for (i = 0; i < MAX_PIPES; i++) { 2999 for (i = 0; i < MAX_PIPES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 056be4c34a98..2c7aaed907b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2230,18 +2230,25 @@ static void get_active_converter_info(
2230 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; 2230 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
2231 ddc_service_set_dongle_type(link->ddc, 2231 ddc_service_set_dongle_type(link->ddc,
2232 link->dpcd_caps.dongle_type); 2232 link->dpcd_caps.dongle_type);
2233 link->dpcd_caps.is_branch_dev = false;
2233 return; 2234 return;
2234 } 2235 }
2235 2236
2236 /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ 2237 /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
2237 link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; 2238 if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
2239 link->dpcd_caps.is_branch_dev = false;
2240 }
2241
2242 else {
2243 link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
2244 }
2238 2245
2239 switch (ds_port.fields.PORT_TYPE) { 2246 switch (ds_port.fields.PORT_TYPE) {
2240 case DOWNSTREAM_VGA: 2247 case DOWNSTREAM_VGA:
2241 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; 2248 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
2242 break; 2249 break;
2243 case DOWNSTREAM_DVI_HDMI: 2250 case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
2244 /* At this point we don't know is it DVI or HDMI, 2251 /* At this point we don't know is it DVI or HDMI or DP++,
2245 * assume DVI.*/ 2252 * assume DVI.*/
2246 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; 2253 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
2247 break; 2254 break;
@@ -2258,6 +2265,10 @@ static void get_active_converter_info(
2258 det_caps, sizeof(det_caps)); 2265 det_caps, sizeof(det_caps));
2259 2266
2260 switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { 2267 switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
2268 /*Handle DP case as DONGLE_NONE*/
2269 case DOWN_STREAM_DETAILED_DP:
2270 link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
2271 break;
2261 case DOWN_STREAM_DETAILED_VGA: 2272 case DOWN_STREAM_DETAILED_VGA:
2262 link->dpcd_caps.dongle_type = 2273 link->dpcd_caps.dongle_type =
2263 DISPLAY_DONGLE_DP_VGA_CONVERTER; 2274 DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2267,6 +2278,8 @@ static void get_active_converter_info(
2267 DISPLAY_DONGLE_DP_DVI_CONVERTER; 2278 DISPLAY_DONGLE_DP_DVI_CONVERTER;
2268 break; 2279 break;
2269 case DOWN_STREAM_DETAILED_HDMI: 2280 case DOWN_STREAM_DETAILED_HDMI:
2281 case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
2282 /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
2270 link->dpcd_caps.dongle_type = 2283 link->dpcd_caps.dongle_type =
2271 DISPLAY_DONGLE_DP_HDMI_CONVERTER; 2284 DISPLAY_DONGLE_DP_HDMI_CONVERTER;
2272 2285
@@ -2282,14 +2295,18 @@ static void get_active_converter_info(
2282 2295
2283 link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = 2296 link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
2284 hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; 2297 hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
2285 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = 2298 /*YCBCR capability only for HDMI case*/
2286 hdmi_caps.bits.YCrCr422_PASS_THROUGH; 2299 if (port_caps->bits.DWN_STRM_PORTX_TYPE
2287 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = 2300 == DOWN_STREAM_DETAILED_HDMI) {
2288 hdmi_caps.bits.YCrCr420_PASS_THROUGH; 2301 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
2289 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = 2302 hdmi_caps.bits.YCrCr422_PASS_THROUGH;
2290 hdmi_caps.bits.YCrCr422_CONVERSION; 2303 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
2291 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = 2304 hdmi_caps.bits.YCrCr420_PASS_THROUGH;
2292 hdmi_caps.bits.YCrCr420_CONVERSION; 2305 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
2306 hdmi_caps.bits.YCrCr422_CONVERSION;
2307 link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
2308 hdmi_caps.bits.YCrCr420_CONVERSION;
2309 }
2293 2310
2294 link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = 2311 link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
2295 translate_dpcd_max_bpc( 2312 translate_dpcd_max_bpc(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 51a78283a86d..2ceaab4fb5de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -258,7 +258,7 @@ bool resource_construct(
258 * PORT_CONNECTIVITY == 1 (as instructed by HW team). 258 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
259 */ 259 */
260 update_num_audio(&straps, &num_audio, &pool->audio_support); 260 update_num_audio(&straps, &num_audio, &pool->audio_support);
261 for (i = 0; i < pool->pipe_count && i < num_audio; i++) { 261 for (i = 0; i < caps->num_audio; i++) {
262 struct audio *aud = create_funcs->create_audio(ctx, i); 262 struct audio *aud = create_funcs->create_audio(ctx, i);
263 263
264 if (aud == NULL) { 264 if (aud == NULL) {
@@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio(
1669 return pool->audios[i]; 1669 return pool->audios[i];
1670 } 1670 }
1671 } 1671 }
1672
1673 /* use engine id to find free audio */
1674 if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
1675 return pool->audios[id];
1676 }
1677
1672 /*not found the matching one, first come first serve*/ 1678 /*not found the matching one, first come first serve*/
1673 for (i = 0; i < pool->audio_count; i++) { 1679 for (i = 0; i < pool->audio_count; i++) {
1674 if (res_ctx->is_audio_acquired[i] == false) { 1680 if (res_ctx->is_audio_acquired[i] == false) {
@@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
1833 pix_clk /= 2; 1839 pix_clk /= 2;
1834 if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { 1840 if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
1835 switch (timing->display_color_depth) { 1841 switch (timing->display_color_depth) {
1842 case COLOR_DEPTH_666:
1836 case COLOR_DEPTH_888: 1843 case COLOR_DEPTH_888:
1837 normalized_pix_clk = pix_clk; 1844 normalized_pix_clk = pix_clk;
1838 break; 1845 break;
@@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources(
1979 /* TODO: Add check if ASIC support and EDID audio */ 1986 /* TODO: Add check if ASIC support and EDID audio */
1980 if (!stream->converter_disable_audio && 1987 if (!stream->converter_disable_audio &&
1981 dc_is_audio_capable_signal(pipe_ctx->stream->signal) && 1988 dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
1982 stream->audio_info.mode_count) { 1989 stream->audio_info.mode_count && stream->audio_info.flags.all) {
1983 pipe_ctx->stream_res.audio = find_first_free_audio( 1990 pipe_ctx->stream_res.audio = find_first_free_audio(
1984 &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); 1991 &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
1985 1992
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index af7f8be230f7..352862370390 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
612 612
613 pipe_ctx->stream->dmdata_address = attr->address; 613 pipe_ctx->stream->dmdata_address = attr->address;
614 614
615 if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { 615 if (pipe_ctx->stream_res.stream_enc &&
616 pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
616 if (pipe_ctx->stream->dmdata_address.quad_part != 0) { 617 if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
617 /* if using dynamic meta, don't set up generic infopackets */ 618 /* if using dynamic meta, don't set up generic infopackets */
618 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; 619 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index f8903bcabe49..58bd131d5b48 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -239,6 +239,10 @@ static void dmcu_set_backlight_level(
239 s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); 239 s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
240 240
241 REG_WRITE(BIOS_SCRATCH_2, s2); 241 REG_WRITE(BIOS_SCRATCH_2, s2);
242
243 /* waitDMCUReadyForCmd */
244 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
245 0, 1, 80000);
242} 246}
243 247
244static void dce_abm_init(struct abm *abm) 248static void dce_abm_init(struct abm *abm)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 858a58856ebd..fafb4b470140 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -965,11 +965,17 @@ void hwss_edp_backlight_control(
965void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) 965void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
966{ 966{
967 /* notify audio driver for audio modes of monitor */ 967 /* notify audio driver for audio modes of monitor */
968 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 968 struct dc *core_dc;
969 struct pp_smu_funcs *pp_smu = NULL; 969 struct pp_smu_funcs *pp_smu = NULL;
970 struct clk_mgr *clk_mgr = core_dc->clk_mgr; 970 struct clk_mgr *clk_mgr;
971 unsigned int i, num_audio = 1; 971 unsigned int i, num_audio = 1;
972 972
973 if (!pipe_ctx->stream)
974 return;
975
976 core_dc = pipe_ctx->stream->ctx->dc;
977 clk_mgr = core_dc->clk_mgr;
978
973 if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) 979 if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
974 return; 980 return;
975 981
@@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
999 1005
1000void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) 1006void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1001{ 1007{
1002 struct dc *dc = pipe_ctx->stream->ctx->dc; 1008 struct dc *dc;
1003 struct pp_smu_funcs *pp_smu = NULL; 1009 struct pp_smu_funcs *pp_smu = NULL;
1004 struct clk_mgr *clk_mgr = dc->clk_mgr; 1010 struct clk_mgr *clk_mgr;
1011
1012 if (!pipe_ctx || !pipe_ctx->stream)
1013 return;
1014
1015 dc = pipe_ctx->stream->ctx->dc;
1016 clk_mgr = dc->clk_mgr;
1005 1017
1006 if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) 1018 if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
1007 return; 1019 return;
@@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1009 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1021 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
1010 pipe_ctx->stream_res.stream_enc, true); 1022 pipe_ctx->stream_res.stream_enc, true);
1011 if (pipe_ctx->stream_res.audio) { 1023 if (pipe_ctx->stream_res.audio) {
1024 pipe_ctx->stream_res.audio->enabled = false;
1025
1012 if (dc->res_pool->pp_smu) 1026 if (dc->res_pool->pp_smu)
1013 pp_smu = dc->res_pool->pp_smu; 1027 pp_smu = dc->res_pool->pp_smu;
1014 1028
@@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1039 /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, 1053 /* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
1040 * stream->stream_engine_id); 1054 * stream->stream_engine_id);
1041 */ 1055 */
1042 if (pipe_ctx->stream_res.audio)
1043 pipe_ctx->stream_res.audio->enabled = false;
1044 } 1056 }
1045} 1057}
1046 1058
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e50a696fcb5d..2118ea21d7e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc)
1195 * everything down. 1195 * everything down.
1196 */ 1196 */
1197 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { 1197 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1198 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1198 dc->hwss.init_pipes(dc, dc->current_state);
1199 struct hubp *hubp = dc->res_pool->hubps[i];
1200 struct dpp *dpp = dc->res_pool->dpps[i];
1201
1202 hubp->funcs->hubp_init(hubp);
1203 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1204 plane_atomic_power_down(dc, dpp, hubp);
1205 }
1206
1207 apply_DEGVIDCN10_253_wa(dc);
1208 } 1199 }
1209 1200
1210 for (i = 0; i < dc->res_pool->audio_count; i++) { 1201 for (i = 0; i < dc->res_pool->audio_count; i++) {
@@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
1375 return result; 1366 return result;
1376} 1367}
1377 1368
1378
1379
1380
1381
1382static bool 1369static bool
1383dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, 1370dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
1384 const struct dc_stream_state *stream) 1371 const struct dc_stream_state *stream)
@@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface(
2516 if (removed_pipe[i]) 2503 if (removed_pipe[i])
2517 dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 2504 dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2518 2505
2506 for (i = 0; i < dc->res_pool->pipe_count; i++)
2507 if (removed_pipe[i]) {
2508 dc->hwss.optimize_bandwidth(dc, context);
2509 break;
2510 }
2511
2519 if (dc->hwseq->wa.DEGVIDCN10_254) 2512 if (dc->hwseq->wa.DEGVIDCN10_254)
2520 hubbub1_wm_change_req_wa(dc->res_pool->hubbub); 2513 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2521} 2514}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1a20461c2937..a12530a3ab9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = {
508 .num_audio = 3, 508 .num_audio = 3,
509 .num_stream_encoder = 3, 509 .num_stream_encoder = 3,
510 .num_pll = 3, 510 .num_pll = 3,
511 .num_ddc = 3, 511 .num_ddc = 4,
512}; 512};
513 513
514static const struct dc_plane_cap plane_cap = { 514static const struct dc_plane_cap plane_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 51a3dfe97f0e..31aa6ee5cd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg)
102 switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { 102 switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
103 case 6: 103 case 6:
104 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); 104 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
105 /* Fall through */
105 case 5: 106 case 5:
106 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); 107 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
108 /* Fall through */
107 case 4: 109 case 4:
108 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); 110 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
111 /* Fall through */
109 case 3: 112 case 3:
110 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); 113 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
114 /* Fall through */
111 case 2: 115 case 2:
112 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); 116 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
117 /* Fall through */
113 case 1: 118 case 1:
114 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); 119 REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
115 break; 120 break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index ece6e136437b..6e2dbd03f9bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
337 break; 337 break;
338 default: 338 default:
339 ASSERT(false); 339 ASSERT(false);
340 block_size = page_table_block_size;
340 break; 341 break;
341 } 342 }
342 343
@@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
366 struct dcn_vmid_page_table_config phys_config; 367 struct dcn_vmid_page_table_config phys_config;
367 368
368 REG_SET(DCN_VM_FB_LOCATION_BASE, 0, 369 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
369 FB_BASE, pa_config->system_aperture.fb_base); 370 FB_BASE, pa_config->system_aperture.fb_base >> 24);
370 REG_SET(DCN_VM_FB_LOCATION_TOP, 0, 371 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
371 FB_TOP, pa_config->system_aperture.fb_top); 372 FB_TOP, pa_config->system_aperture.fb_top >> 24);
372 REG_SET(DCN_VM_FB_OFFSET, 0, 373 REG_SET(DCN_VM_FB_OFFSET, 0,
373 FB_OFFSET, pa_config->system_aperture.fb_offset); 374 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
374 REG_SET(DCN_VM_AGP_BOT, 0, 375 REG_SET(DCN_VM_AGP_BOT, 0,
375 AGP_BOT, pa_config->system_aperture.agp_bot); 376 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
376 REG_SET(DCN_VM_AGP_TOP, 0, 377 REG_SET(DCN_VM_AGP_TOP, 0,
377 AGP_TOP, pa_config->system_aperture.agp_top); 378 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
378 REG_SET(DCN_VM_AGP_BASE, 0, 379 REG_SET(DCN_VM_AGP_BASE, 0,
379 AGP_BASE, pa_config->system_aperture.agp_base); 380 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
380 381
381 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { 382 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
382 phys_config.depth = 1;
383 phys_config.block_size = 4096;
384 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; 383 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
385 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; 384 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
386 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; 385 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
387 386 phys_config.depth = 0;
387 phys_config.block_size = 0;
388 // Init VMID 0 based on PA config 388 // Init VMID 0 based on PA config
389 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); 389 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
390 } 390 }
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 0b84a322b8a2..d810c8940129 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1153,8 +1153,8 @@ void dcn20_enable_plane(
1153 1153
1154 apt.sys_default.quad_part = 0; 1154 apt.sys_default.quad_part = 0;
1155 1155
1156 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr; 1156 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
1157 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr; 1157 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
1158 1158
1159 // Program system aperture settings 1159 // Program system aperture settings
1160 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); 1160 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
@@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global(
1242 CRTC_STATE_VACTIVE); 1242 CRTC_STATE_VACTIVE);
1243 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1243 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
1244 CRTC_STATE_VBLANK); 1244 CRTC_STATE_VBLANK);
1245 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
1246 CRTC_STATE_VACTIVE);
1245 pipe->stream_res.tg->funcs->lock_doublebuffer_disable( 1247 pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
1246 pipe->stream_res.tg); 1248 pipe->stream_res.tg);
1247 } 1249 }
@@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock(
1263 if (pipe->plane_state != NULL) 1265 if (pipe->plane_state != NULL)
1264 flip_immediate = pipe->plane_state->flip_immediate; 1266 flip_immediate = pipe->plane_state->flip_immediate;
1265 1267
1268 if (flip_immediate && lock) {
1269 while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) {
1270 udelay(1);
1271 }
1272
1273 if (pipe->bottom_pipe != NULL)
1274 while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) {
1275 udelay(1);
1276 }
1277 }
1278
1266 /* In flip immediate and pipe splitting case, we need to use GSL 1279 /* In flip immediate and pipe splitting case, we need to use GSL
1267 * for synchronization. Only do setup on locking and on flip type change. 1280 * for synchronization. Only do setup on locking and on flip type change.
1268 */ 1281 */
@@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe(
1740 else if (pipe_ctx->stream_res.audio) { 1753 else if (pipe_ctx->stream_res.audio) {
1741 dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); 1754 dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
1742 } 1755 }
1743
1744 } 1756 }
1757#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1758 else if (pipe_ctx->stream_res.dsc)
1759 dp_set_dsc_enable(pipe_ctx, false);
1760#endif
1745 1761
1746 /* by upper caller loop, parent pipe: pipe0, will be reset last. 1762 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1747 * back end share by all pipes and will be disable only when disable 1763 * back end share by all pipes and will be disable only when disable
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 26a66ccf6e72..1ae973962d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1)
535 optc1->min_h_blank = 32; 535 optc1->min_h_blank = 32;
536 optc1->min_v_blank = 3; 536 optc1->min_v_blank = 3;
537 optc1->min_v_blank_interlace = 5; 537 optc1->min_v_blank_interlace = 5;
538 optc1->min_h_sync_width = 8; 538 optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
539 optc1->min_v_sync_width = 1; 539 optc1->min_v_sync_width = 1;
540 optc1->comb_opp_id = 0xf; 540 optc1->comb_opp_id = 0xf;
541} 541}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d200bc3cec71..b949e202d6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
2643 2643
2644 if (dc->bb_overrides.min_dcfclk_mhz > 0) 2644 if (dc->bb_overrides.min_dcfclk_mhz > 0)
2645 min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; 2645 min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
2646 else
2647 // Accounting for SOC/DCF relationship, we can go as high as
2648 // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
2649 min_dcfclk = 507;
2646 2650
2647 for (i = 0; i < num_states; i++) { 2651 for (i = 0; i < num_states; i++) {
2648 int min_fclk_required_by_uclk; 2652 int min_fclk_required_by_uclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
index 27679ef6ebe8..96c263223315 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
@@ -23,6 +23,8 @@
23 * 23 *
24 */ 24 */
25 25
26#include <linux/delay.h>
27
26#include "dcn20_vmid.h" 28#include "dcn20_vmid.h"
27#include "reg_helper.h" 29#include "reg_helper.h"
28 30
@@ -36,6 +38,38 @@
36#define FN(reg_name, field_name) \ 38#define FN(reg_name, field_name) \
37 vmid->shifts->field_name, vmid->masks->field_name 39 vmid->shifts->field_name, vmid->masks->field_name
38 40
41static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
42{
43 /* According the hardware spec, we need to poll for the lowest
44 * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
45 * context is updated. We can't use REG_WAIT here since we
46 * don't have a seperate field to wait on.
47 *
48 * TODO: Confirm timeout / poll interval with hardware team
49 */
50
51 int max_times = 10000;
52 int delay_us = 5;
53 int i;
54
55 for (i = 0; i < max_times; ++i) {
56 uint32_t entry_lo32;
57
58 REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
59 VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
60 &entry_lo32);
61
62 if (entry_lo32 & 0x1)
63 return;
64
65 udelay(delay_us);
66 }
67
68 /* VM setup timed out */
69 DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
70 ASSERT(0);
71}
72
39void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) 73void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
40{ 74{
41 REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, 75 REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
@@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_
54 88
55 REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, 89 REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
56 VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); 90 VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
91 /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
57 REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, 92 REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
58 VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); 93 VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
94
95 dcn20_wait_for_vmid_ready(vmid);
59} 96}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
index 67089765780b..340ef4d41ebd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
@@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
377 vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; 377 vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
378 vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; 378 vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
379 379
380 /* As per DSC spec v1.2a recommendation: */
381 if (vdsc_cfg->native_420)
382 vdsc_cfg->second_line_offset_adj = 512;
383 else
384 vdsc_cfg->second_line_offset_adj = 0;
385
380 return 0; 386 return 0;
381} 387}
382EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); 388EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c89393c19232..a148ffde8b12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,7 +212,7 @@ struct resource_pool {
212 struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; 212 struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
213 unsigned int clk_src_count; 213 unsigned int clk_src_count;
214 214
215 struct audio *audios[MAX_PIPES]; 215 struct audio *audios[MAX_AUDIOS];
216 unsigned int audio_count; 216 unsigned int audio_count;
217 struct audio_support audio_support; 217 struct audio_support audio_support;
218 218
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 959f5b654611..9502478c4a1b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth {
61}; 61};
62 62
63enum dcn_hubbub_page_table_block_size { 63enum dcn_hubbub_page_table_block_size {
64 DCN_PAGE_TABLE_BLOCK_SIZE_4KB, 64 DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0,
65 DCN_PAGE_TABLE_BLOCK_SIZE_64KB 65 DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4,
66}; 66};
67 67
68struct dcn_hubbub_phys_addr_config { 68struct dcn_hubbub_phys_addr_config {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 8759ec03aede..f82365e2d03c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -34,6 +34,7 @@
34 * Data types shared between different Virtual HW blocks 34 * Data types shared between different Virtual HW blocks
35 ******************************************************************************/ 35 ******************************************************************************/
36 36
37#define MAX_AUDIOS 7
37#define MAX_PIPES 6 38#define MAX_PIPES 6
38#if defined(CONFIG_DRM_AMD_DC_DCN2_0) 39#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
39#define MAX_DWB_PIPES 1 40#define MAX_DWB_PIPES 1
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 1c66166d0a94..2c90d1b46c8b 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -43,7 +43,7 @@ enum dpcd_revision {
43enum dpcd_downstream_port_type { 43enum dpcd_downstream_port_type {
44 DOWNSTREAM_DP = 0, 44 DOWNSTREAM_DP = 0,
45 DOWNSTREAM_VGA, 45 DOWNSTREAM_VGA,
46 DOWNSTREAM_DVI_HDMI, 46 DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */
47 DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ 47 DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
48}; 48};
49 49
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9f661bf96ed0..5b1ebb7f995a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -123,6 +123,7 @@ enum amd_pp_sensors {
123 AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, 123 AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
124 AMDGPU_PP_SENSOR_MIN_FAN_RPM, 124 AMDGPU_PP_SENSOR_MIN_FAN_RPM,
125 AMDGPU_PP_SENSOR_MAX_FAN_RPM, 125 AMDGPU_PP_SENSOR_MAX_FAN_RPM,
126 AMDGPU_PP_SENSOR_VCN_POWER_STATE,
126}; 127};
127 128
128enum amd_pp_task { 129enum amd_pp_task {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f1565c448de5..8a3eadeebdcb 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
137{ 137{
138 int ret = 0, clk_id = 0; 138 int ret = 0, clk_id = 0;
139 uint32_t param = 0; 139 uint32_t param = 0;
140 uint32_t clock_limit;
140 141
141 if (!min && !max) 142 if (!min && !max)
142 return -EINVAL; 143 return -EINVAL;
143 144
144 if (!smu_clk_dpm_is_enabled(smu, clk_type)) 145 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
146 switch (clk_type) {
147 case SMU_MCLK:
148 case SMU_UCLK:
149 clock_limit = smu->smu_table.boot_values.uclk;
150 break;
151 case SMU_GFXCLK:
152 case SMU_SCLK:
153 clock_limit = smu->smu_table.boot_values.gfxclk;
154 break;
155 case SMU_SOCCLK:
156 clock_limit = smu->smu_table.boot_values.socclk;
157 break;
158 default:
159 clock_limit = 0;
160 break;
161 }
162
163 /* clock in Mhz unit */
164 if (min)
165 *min = clock_limit / 100;
166 if (max)
167 *max = clock_limit / 100;
168
145 return 0; 169 return 0;
170 }
146 171
147 mutex_lock(&smu->mutex); 172 mutex_lock(&smu->mutex);
148 clk_id = smu_clk_get_index(smu, clk_type); 173 clk_id = smu_clk_get_index(smu, clk_type);
@@ -281,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
281 306
282 /* not support power state */ 307 /* not support power state */
283 memset(state_info, 0, sizeof(struct pp_states_info)); 308 memset(state_info, 0, sizeof(struct pp_states_info));
284 state_info->nums = 0; 309 state_info->nums = 1;
310 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
285 311
286 return 0; 312 return 0;
287} 313}
@@ -289,6 +315,8 @@ int smu_get_power_num_states(struct smu_context *smu,
289int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, 315int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
290 void *data, uint32_t *size) 316 void *data, uint32_t *size)
291{ 317{
318 struct smu_power_context *smu_power = &smu->smu_power;
319 struct smu_power_gate *power_gate = &smu_power->power_gate;
292 int ret = 0; 320 int ret = 0;
293 321
294 switch (sensor) { 322 switch (sensor) {
@@ -312,6 +340,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
312 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 340 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
313 *size = 4; 341 *size = 4;
314 break; 342 break;
343 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
344 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
345 *size = 4;
346 break;
315 default: 347 default:
316 ret = -EINVAL; 348 ret = -EINVAL;
317 break; 349 break;
@@ -698,6 +730,12 @@ static int smu_sw_init(void *handle)
698 return ret; 730 return ret;
699 } 731 }
700 732
733 ret = smu_register_irq_handler(smu);
734 if (ret) {
735 pr_err("Failed to register smc irq handler!\n");
736 return ret;
737 }
738
701 return 0; 739 return 0;
702} 740}
703 741
@@ -707,6 +745,9 @@ static int smu_sw_fini(void *handle)
707 struct smu_context *smu = &adev->smu; 745 struct smu_context *smu = &adev->smu;
708 int ret; 746 int ret;
709 747
748 kfree(smu->irq_source);
749 smu->irq_source = NULL;
750
710 ret = smu_smc_table_sw_fini(smu); 751 ret = smu_smc_table_sw_fini(smu);
711 if (ret) { 752 if (ret) {
712 pr_err("Failed to sw fini smc table!\n"); 753 pr_err("Failed to sw fini smc table!\n");
@@ -1063,10 +1104,6 @@ static int smu_hw_init(void *handle)
1063 if (ret) 1104 if (ret)
1064 goto failed; 1105 goto failed;
1065 1106
1066 ret = smu_register_irq_handler(smu);
1067 if (ret)
1068 goto failed;
1069
1070 if (!smu->pm_enabled) 1107 if (!smu->pm_enabled)
1071 adev->pm.dpm_enabled = false; 1108 adev->pm.dpm_enabled = false;
1072 else 1109 else
@@ -1096,9 +1133,6 @@ static int smu_hw_fini(void *handle)
1096 kfree(table_context->overdrive_table); 1133 kfree(table_context->overdrive_table);
1097 table_context->overdrive_table = NULL; 1134 table_context->overdrive_table = NULL;
1098 1135
1099 kfree(smu->irq_source);
1100 smu->irq_source = NULL;
1101
1102 ret = smu_fini_fb_allocations(smu); 1136 ret = smu_fini_fb_allocations(smu);
1103 if (ret) 1137 if (ret)
1104 return ret; 1138 return ret;
@@ -1349,13 +1383,49 @@ static int smu_enable_umd_pstate(void *handle,
1349 return 0; 1383 return 0;
1350} 1384}
1351 1385
1386static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1387{
1388 int ret = 0;
1389 uint32_t sclk_mask, mclk_mask, soc_mask;
1390
1391 switch (level) {
1392 case AMD_DPM_FORCED_LEVEL_HIGH:
1393 ret = smu_force_dpm_limit_value(smu, true);
1394 break;
1395 case AMD_DPM_FORCED_LEVEL_LOW:
1396 ret = smu_force_dpm_limit_value(smu, false);
1397 break;
1398 case AMD_DPM_FORCED_LEVEL_AUTO:
1399 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1400 ret = smu_unforce_dpm_levels(smu);
1401 break;
1402 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1403 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1404 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1405 ret = smu_get_profiling_clk_mask(smu, level,
1406 &sclk_mask,
1407 &mclk_mask,
1408 &soc_mask);
1409 if (ret)
1410 return ret;
1411 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1412 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1413 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1414 break;
1415 case AMD_DPM_FORCED_LEVEL_MANUAL:
1416 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1417 default:
1418 break;
1419 }
1420 return ret;
1421}
1422
1352int smu_adjust_power_state_dynamic(struct smu_context *smu, 1423int smu_adjust_power_state_dynamic(struct smu_context *smu,
1353 enum amd_dpm_forced_level level, 1424 enum amd_dpm_forced_level level,
1354 bool skip_display_settings) 1425 bool skip_display_settings)
1355{ 1426{
1356 int ret = 0; 1427 int ret = 0;
1357 int index = 0; 1428 int index = 0;
1358 uint32_t sclk_mask, mclk_mask, soc_mask;
1359 long workload; 1429 long workload;
1360 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1430 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1361 1431
@@ -1386,39 +1456,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
1386 } 1456 }
1387 1457
1388 if (smu_dpm_ctx->dpm_level != level) { 1458 if (smu_dpm_ctx->dpm_level != level) {
1389 switch (level) { 1459 ret = smu_asic_set_performance_level(smu, level);
1390 case AMD_DPM_FORCED_LEVEL_HIGH: 1460 if (ret) {
1391 ret = smu_force_dpm_limit_value(smu, true); 1461 ret = smu_default_set_performance_level(smu, level);
1392 break;
1393 case AMD_DPM_FORCED_LEVEL_LOW:
1394 ret = smu_force_dpm_limit_value(smu, false);
1395 break;
1396
1397 case AMD_DPM_FORCED_LEVEL_AUTO:
1398 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1399 ret = smu_unforce_dpm_levels(smu);
1400 break;
1401
1402 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1403 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1404 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1405 ret = smu_get_profiling_clk_mask(smu, level,
1406 &sclk_mask,
1407 &mclk_mask,
1408 &soc_mask);
1409 if (ret)
1410 return ret;
1411 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1412 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1413 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1414 break;
1415
1416 case AMD_DPM_FORCED_LEVEL_MANUAL:
1417 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1418 default:
1419 break;
1420 } 1462 }
1421
1422 if (!ret) 1463 if (!ret)
1423 smu_dpm_ctx->dpm_level = level; 1464 smu_dpm_ctx->dpm_level = level;
1424 } 1465 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index e32ae9d3373c..18e780f566fa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1111static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, 1111static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1112 void *value, int *size) 1112 void *value, int *size)
1113{ 1113{
1114 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1114 uint32_t sclk, mclk; 1115 uint32_t sclk, mclk;
1115 int ret = 0; 1116 int ret = 0;
1116 1117
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1132 case AMDGPU_PP_SENSOR_GPU_TEMP: 1133 case AMDGPU_PP_SENSOR_GPU_TEMP:
1133 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); 1134 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1134 break; 1135 break;
1136 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
1137 *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
1138 *size = 4;
1139 break;
1135 default: 1140 default:
1136 ret = -EINVAL; 1141 ret = -EINVAL;
1137 break; 1142 break;
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
1175 1180
1176static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) 1181static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1177{ 1182{
1183 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1184
1178 if (bgate) { 1185 if (bgate) {
1179 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 1186 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1180 AMD_IP_BLOCK_TYPE_VCN, 1187 AMD_IP_BLOCK_TYPE_VCN,
1181 AMD_PG_STATE_GATE); 1188 AMD_PG_STATE_GATE);
1182 smum_send_msg_to_smc_with_parameter(hwmgr, 1189 smum_send_msg_to_smc_with_parameter(hwmgr,
1183 PPSMC_MSG_PowerDownVcn, 0); 1190 PPSMC_MSG_PowerDownVcn, 0);
1191 smu10_data->vcn_power_gated = true;
1184 } else { 1192 } else {
1185 smum_send_msg_to_smc_with_parameter(hwmgr, 1193 smum_send_msg_to_smc_with_parameter(hwmgr,
1186 PPSMC_MSG_PowerUpVcn, 0); 1194 PPSMC_MSG_PowerUpVcn, 0);
1187 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 1195 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1188 AMD_IP_BLOCK_TYPE_VCN, 1196 AMD_IP_BLOCK_TYPE_VCN,
1189 AMD_PG_STATE_UNGATE); 1197 AMD_PG_STATE_UNGATE);
1198 smu10_data->vcn_power_gated = false;
1190 } 1199 }
1191} 1200}
1192 1201
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f27c6fbb192e..90c4e87ac5ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2101 if (ret) 2101 if (ret)
2102 return ret; 2102 return ret;
2103 2103
2104 *query = metrics_table.CurrSocketPower << 8; 2104 /* For the 40.46 release, they changed the value name */
2105 if (hwmgr->smu_version == 0x282e00)
2106 *query = metrics_table.AverageSocketPower << 8;
2107 else
2108 *query = metrics_table.CurrSocketPower << 8;
2105 2109
2106 return ret; 2110 return ret;
2107} 2111}
@@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2349 data->dpm_table.soc_table.dpm_state.soft_max_level = 2353 data->dpm_table.soc_table.dpm_state.soft_max_level =
2350 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2354 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2351 2355
2352 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2356 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2357 FEATURE_DPM_UCLK_MASK |
2358 FEATURE_DPM_SOCCLK_MASK);
2353 PP_ASSERT_WITH_CODE(!ret, 2359 PP_ASSERT_WITH_CODE(!ret,
2354 "Failed to upload boot level to highest!", 2360 "Failed to upload boot level to highest!",
2355 return ret); 2361 return ret);
2356 2362
2357 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2363 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2364 FEATURE_DPM_UCLK_MASK |
2365 FEATURE_DPM_SOCCLK_MASK);
2358 PP_ASSERT_WITH_CODE(!ret, 2366 PP_ASSERT_WITH_CODE(!ret,
2359 "Failed to upload dpm max level to highest!", 2367 "Failed to upload dpm max level to highest!",
2360 return ret); 2368 return ret);
@@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2387 data->dpm_table.soc_table.dpm_state.soft_max_level = 2395 data->dpm_table.soc_table.dpm_state.soft_max_level =
2388 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2396 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2389 2397
2390 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2398 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2399 FEATURE_DPM_UCLK_MASK |
2400 FEATURE_DPM_SOCCLK_MASK);
2391 PP_ASSERT_WITH_CODE(!ret, 2401 PP_ASSERT_WITH_CODE(!ret,
2392 "Failed to upload boot level to highest!", 2402 "Failed to upload boot level to highest!",
2393 return ret); 2403 return ret);
2394 2404
2395 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2405 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2406 FEATURE_DPM_UCLK_MASK |
2407 FEATURE_DPM_SOCCLK_MASK);
2396 PP_ASSERT_WITH_CODE(!ret, 2408 PP_ASSERT_WITH_CODE(!ret,
2397 "Failed to upload dpm max level to highest!", 2409 "Failed to upload dpm max level to highest!",
2398 return ret); 2410 return ret);
@@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2403 2415
2404static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2416static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2405{ 2417{
2418 struct vega20_hwmgr *data =
2419 (struct vega20_hwmgr *)(hwmgr->backend);
2420 uint32_t soft_min_level, soft_max_level;
2406 int ret = 0; 2421 int ret = 0;
2407 2422
2408 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2423 /* gfxclk soft min/max settings */
2424 soft_min_level =
2425 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2426 soft_max_level =
2427 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2428
2429 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2430 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2431 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2432 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2433
2434 /* uclk soft min/max settings */
2435 soft_min_level =
2436 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2437 soft_max_level =
2438 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2439
2440 data->dpm_table.mem_table.dpm_state.soft_min_level =
2441 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2442 data->dpm_table.mem_table.dpm_state.soft_max_level =
2443 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2444
2445 /* socclk soft min/max settings */
2446 soft_min_level =
2447 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2448 soft_max_level =
2449 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2450
2451 data->dpm_table.soc_table.dpm_state.soft_min_level =
2452 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2453 data->dpm_table.soc_table.dpm_state.soft_max_level =
2454 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2455
2456 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2457 FEATURE_DPM_UCLK_MASK |
2458 FEATURE_DPM_SOCCLK_MASK);
2409 PP_ASSERT_WITH_CODE(!ret, 2459 PP_ASSERT_WITH_CODE(!ret,
2410 "Failed to upload DPM Bootup Levels!", 2460 "Failed to upload DPM Bootup Levels!",
2411 return ret); 2461 return ret);
2412 2462
2413 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2463 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2464 FEATURE_DPM_UCLK_MASK |
2465 FEATURE_DPM_SOCCLK_MASK);
2414 PP_ASSERT_WITH_CODE(!ret, 2466 PP_ASSERT_WITH_CODE(!ret,
2415 "Failed to upload DPM Max Levels!", 2467 "Failed to upload DPM Max Levels!",
2416 return ret); 2468 return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 1af992fb0bde..a78b2e295895 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -429,7 +429,6 @@ struct smu_table_context
429 struct smu_table *tables; 429 struct smu_table *tables;
430 uint32_t table_count; 430 uint32_t table_count;
431 struct smu_table memory_pool; 431 struct smu_table memory_pool;
432 uint16_t software_shutdown_temp;
433 uint8_t thermal_controller_type; 432 uint8_t thermal_controller_type;
434 uint16_t TDPODLimit; 433 uint16_t TDPODLimit;
435 434
@@ -452,6 +451,7 @@ struct smu_dpm_context {
452struct smu_power_gate { 451struct smu_power_gate {
453 bool uvd_gated; 452 bool uvd_gated;
454 bool vce_gated; 453 bool vce_gated;
454 bool vcn_gated;
455}; 455};
456 456
457struct smu_power_context { 457struct smu_power_context {
@@ -613,6 +613,7 @@ struct pptable_funcs {
613 int (*tables_init)(struct smu_context *smu, struct smu_table *tables); 613 int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
614 int (*set_thermal_fan_table)(struct smu_context *smu); 614 int (*set_thermal_fan_table)(struct smu_context *smu);
615 int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); 615 int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
616 int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
616 int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, 617 int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
617 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); 618 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
618 int (*get_current_clk_freq_by_table)(struct smu_context *smu, 619 int (*get_current_clk_freq_by_table)(struct smu_context *smu,
@@ -621,6 +622,7 @@ struct pptable_funcs {
621 int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); 622 int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
622 int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); 623 int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
623 int (*set_default_od_settings)(struct smu_context *smu, bool initialize); 624 int (*set_default_od_settings)(struct smu_context *smu, bool initialize);
625 int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
624}; 626};
625 627
626struct smu_funcs 628struct smu_funcs
@@ -685,7 +687,6 @@ struct smu_funcs
685 int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, 687 int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
686 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); 688 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
687 int (*conv_power_profile_to_pplib_workload)(int power_profile); 689 int (*conv_power_profile_to_pplib_workload)(int power_profile);
688 int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
689 uint32_t (*get_fan_control_mode)(struct smu_context *smu); 690 uint32_t (*get_fan_control_mode)(struct smu_context *smu);
690 int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); 691 int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
691 int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); 692 int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
@@ -751,8 +752,6 @@ struct smu_funcs
751 ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) 752 ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
752#define smu_set_default_od_settings(smu, initialize) \ 753#define smu_set_default_od_settings(smu, initialize) \
753 ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) 754 ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
754#define smu_get_current_rpm(smu, speed) \
755 ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
756#define smu_set_fan_speed_rpm(smu, speed) \ 755#define smu_set_fan_speed_rpm(smu, speed) \
757 ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) 756 ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
758#define smu_send_smc_msg(smu, msg) \ 757#define smu_send_smc_msg(smu, msg) \
@@ -841,6 +840,8 @@ struct smu_funcs
841 ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) 840 ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
842#define smu_set_fan_speed_percent(smu, speed) \ 841#define smu_set_fan_speed_percent(smu, speed) \
843 ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) 842 ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
843#define smu_get_fan_speed_rpm(smu, speed) \
844 ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
844 845
845#define smu_msg_get_index(smu, msg) \ 846#define smu_msg_get_index(smu, msg) \
846 ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) 847 ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
@@ -906,8 +907,6 @@ struct smu_funcs
906 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) 907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
907#define smu_set_azalia_d3_pme(smu) \ 908#define smu_set_azalia_d3_pme(smu) \
908 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) 909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
909#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
910 ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
911#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ 910#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
912 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) 911 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
913#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ 912#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
@@ -918,6 +917,9 @@ struct smu_funcs
918 ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) 917 ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
919#define smu_baco_reset(smu) \ 918#define smu_baco_reset(smu) \
920 ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) 919 ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
920#define smu_asic_set_performance_level(smu, level) \
921 ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
922
921 923
922extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, 924extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
923 uint16_t *size, uint8_t *frev, uint8_t *crev, 925 uint16_t *size, uint8_t *frev, uint8_t *crev,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 2dae0ae0829e..b81c7e715dc9 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -23,6 +23,7 @@
23 23
24#include "pp_debug.h" 24#include "pp_debug.h"
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/pci.h>
26#include "amdgpu.h" 27#include "amdgpu.h"
27#include "amdgpu_smu.h" 28#include "amdgpu_smu.h"
28#include "atomfirmware.h" 29#include "atomfirmware.h"
@@ -501,6 +502,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
501 502
502static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) 503static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
503{ 504{
505 struct smu_table_context *smu_table = &smu->smu_table;
506
504 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 507 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
505 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
506 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 509 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
@@ -515,9 +518,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
515 sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, 518 sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
516 AMDGPU_GEM_DOMAIN_VRAM); 519 AMDGPU_GEM_DOMAIN_VRAM);
517 520
521 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
522 if (!smu_table->metrics_table)
523 return -ENOMEM;
524 smu_table->metrics_time = 0;
525
518 return 0; 526 return 0;
519} 527}
520 528
529static int navi10_get_metrics_table(struct smu_context *smu,
530 SmuMetrics_t *metrics_table)
531{
532 struct smu_table_context *smu_table= &smu->smu_table;
533 int ret = 0;
534
535 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
536 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
537 (void *)smu_table->metrics_table, false);
538 if (ret) {
539 pr_info("Failed to export SMU metrics table!\n");
540 return ret;
541 }
542 smu_table->metrics_time = jiffies;
543 }
544
545 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
546
547 return ret;
548}
549
521static int navi10_allocate_dpm_context(struct smu_context *smu) 550static int navi10_allocate_dpm_context(struct smu_context *smu)
522{ 551{
523 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 552 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -576,44 +605,38 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
576 605
577static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) 606static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
578{ 607{
579 int ret = 0;
580 struct smu_power_context *smu_power = &smu->smu_power; 608 struct smu_power_context *smu_power = &smu->smu_power;
581 struct smu_power_gate *power_gate = &smu_power->power_gate; 609 struct smu_power_gate *power_gate = &smu_power->power_gate;
610 int ret = 0;
582 611
583 if (enable && power_gate->uvd_gated) { 612 if (enable) {
584 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { 613 /* vcn dpm on is a prerequisite for vcn power gate messages */
614 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
585 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); 615 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
586 if (ret) 616 if (ret)
587 return ret; 617 return ret;
588 } 618 }
589 power_gate->uvd_gated = false; 619 power_gate->vcn_gated = false;
590 } else { 620 } else {
591 if (!enable && !power_gate->uvd_gated) { 621 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
592 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { 622 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
593 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); 623 if (ret)
594 if (ret) 624 return ret;
595 return ret;
596 }
597 power_gate->uvd_gated = true;
598 } 625 }
626 power_gate->vcn_gated = true;
599 } 627 }
600 628
601 return 0; 629 return ret;
602} 630}
603 631
604static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, 632static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
605 enum smu_clk_type clk_type, 633 enum smu_clk_type clk_type,
606 uint32_t *value) 634 uint32_t *value)
607{ 635{
608 static SmuMetrics_t metrics;
609 int ret = 0, clk_id = 0; 636 int ret = 0, clk_id = 0;
637 SmuMetrics_t metrics;
610 638
611 if (!value) 639 ret = navi10_get_metrics_table(smu, &metrics);
612 return -EINVAL;
613
614 memset(&metrics, 0, sizeof(metrics));
615
616 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
617 if (ret) 640 if (ret)
618 return ret; 641 return ret;
619 642
@@ -626,11 +649,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
626 return ret; 649 return ret;
627} 650}
628 651
652static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
653{
654 PPTable_t *pptable = smu->smu_table.driver_pptable;
655 DpmDescriptor_t *dpm_desc = NULL;
656 uint32_t clk_index = 0;
657
658 clk_index = smu_clk_get_index(smu, clk_type);
659 dpm_desc = &pptable->DpmDescriptor[clk_index];
660
661 /* 0 - Fine grained DPM, 1 - Discrete DPM */
662 return dpm_desc->SnapToDiscrete == 0 ? true : false;
663}
664
629static int navi10_print_clk_levels(struct smu_context *smu, 665static int navi10_print_clk_levels(struct smu_context *smu,
630 enum smu_clk_type clk_type, char *buf) 666 enum smu_clk_type clk_type, char *buf)
631{ 667{
632 int i, size = 0, ret = 0; 668 int i, size = 0, ret = 0;
633 uint32_t cur_value = 0, value = 0, count = 0; 669 uint32_t cur_value = 0, value = 0, count = 0;
670 uint32_t freq_values[3] = {0};
671 uint32_t mark_index = 0;
634 672
635 switch (clk_type) { 673 switch (clk_type) {
636 case SMU_GFXCLK: 674 case SMU_GFXCLK:
@@ -643,22 +681,42 @@ static int navi10_print_clk_levels(struct smu_context *smu,
643 ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); 681 ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
644 if (ret) 682 if (ret)
645 return size; 683 return size;
684
646 /* 10KHz -> MHz */ 685 /* 10KHz -> MHz */
647 cur_value = cur_value / 100; 686 cur_value = cur_value / 100;
648 687
649 size += sprintf(buf, "current clk: %uMhz\n", cur_value);
650
651 ret = smu_get_dpm_level_count(smu, clk_type, &count); 688 ret = smu_get_dpm_level_count(smu, clk_type, &count);
652 if (ret) 689 if (ret)
653 return size; 690 return size;
654 691
655 for (i = 0; i < count; i++) { 692 if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
656 ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); 693 for (i = 0; i < count; i++) {
694 ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
695 if (ret)
696 return size;
697
698 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
699 cur_value == value ? "*" : "");
700 }
701 } else {
702 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
657 if (ret) 703 if (ret)
658 return size; 704 return size;
705 ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
706 if (ret)
707 return size;
708
709 freq_values[1] = cur_value;
710 mark_index = cur_value == freq_values[0] ? 0 :
711 cur_value == freq_values[2] ? 2 : 1;
712 if (mark_index != 1)
713 freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
714
715 for (i = 0; i < 3; i++) {
716 size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
717 i == mark_index ? "*" : "");
718 }
659 719
660 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
661 cur_value == value ? "*" : "");
662 } 720 }
663 break; 721 break;
664 default: 722 default:
@@ -866,8 +924,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
866 if (!value) 924 if (!value)
867 return -EINVAL; 925 return -EINVAL;
868 926
869 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, 927 ret = navi10_get_metrics_table(smu, &metrics);
870 false); 928 if (ret)
929 return ret;
871 if (ret) 930 if (ret)
872 return ret; 931 return ret;
873 932
@@ -886,10 +945,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
886 if (!value) 945 if (!value)
887 return -EINVAL; 946 return -EINVAL;
888 947
889 msleep(1); 948 ret = navi10_get_metrics_table(smu, &metrics);
890
891 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
892 (void *)&metrics, false);
893 if (ret) 949 if (ret)
894 return ret; 950 return ret;
895 951
@@ -919,22 +975,22 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
919 return !!(feature_enabled & SMC_DPM_FEATURE); 975 return !!(feature_enabled & SMC_DPM_FEATURE);
920} 976}
921 977
922static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) 978static int navi10_get_fan_speed_rpm(struct smu_context *smu,
979 uint32_t *speed)
923{ 980{
924 SmuMetrics_t metrics; 981 SmuMetrics_t metrics;
925 int ret = 0; 982 int ret = 0;
926 983
927 if (!value) 984 if (!speed)
928 return -EINVAL; 985 return -EINVAL;
929 986
930 memset(&metrics, 0, sizeof(metrics)); 987 ret = navi10_get_metrics_table(smu, &metrics);
931 988 if (ret)
932 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 989 return ret;
933 (void *)&metrics, false);
934 if (ret) 990 if (ret)
935 return ret; 991 return ret;
936 992
937 *value = metrics.CurrFanSpeed; 993 *speed = metrics.CurrFanSpeed;
938 994
939 return ret; 995 return ret;
940} 996}
@@ -944,10 +1000,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu,
944{ 1000{
945 int ret = 0; 1001 int ret = 0;
946 uint32_t percent = 0; 1002 uint32_t percent = 0;
947 uint16_t current_rpm; 1003 uint32_t current_rpm;
948 PPTable_t *pptable = smu->smu_table.driver_pptable; 1004 PPTable_t *pptable = smu->smu_table.driver_pptable;
949 1005
950 ret = navi10_get_fan_speed(smu, &current_rpm); 1006 ret = navi10_get_fan_speed_rpm(smu, &current_rpm);
951 if (ret) 1007 if (ret)
952 return ret; 1008 return ret;
953 1009
@@ -1278,7 +1334,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu,
1278 if (!value) 1334 if (!value)
1279 return -EINVAL; 1335 return -EINVAL;
1280 1336
1281 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); 1337 ret = navi10_get_metrics_table(smu, &metrics);
1282 if (ret) 1338 if (ret)
1283 return ret; 1339 return ret;
1284 1340
@@ -1530,6 +1586,76 @@ static int navi10_set_ppfeature_status(struct smu_context *smu,
1530 return 0; 1586 return 0;
1531} 1587}
1532 1588
1589static int navi10_set_peak_clock_by_device(struct smu_context *smu)
1590{
1591 struct amdgpu_device *adev = smu->adev;
1592 int ret = 0;
1593 uint32_t sclk_freq = 0, uclk_freq = 0;
1594 uint32_t uclk_level = 0;
1595
1596 switch (adev->pdev->revision) {
1597 case 0xf0: /* XTX */
1598 case 0xc0:
1599 sclk_freq = NAVI10_PEAK_SCLK_XTX;
1600 break;
1601 case 0xf1: /* XT */
1602 case 0xc1:
1603 sclk_freq = NAVI10_PEAK_SCLK_XT;
1604 break;
1605 default: /* XL */
1606 sclk_freq = NAVI10_PEAK_SCLK_XL;
1607 break;
1608 }
1609
1610 ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
1611 if (ret)
1612 return ret;
1613 ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
1614 if (ret)
1615 return ret;
1616
1617 ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
1618 if (ret)
1619 return ret;
1620 ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
1621 if (ret)
1622 return ret;
1623
1624 return ret;
1625}
1626
1627static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1628{
1629 int ret = 0;
1630
1631 switch (level) {
1632 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1633 ret = navi10_set_peak_clock_by_device(smu);
1634 break;
1635 default:
1636 ret = -EINVAL;
1637 break;
1638 }
1639
1640 return ret;
1641}
1642
1643static int navi10_get_thermal_temperature_range(struct smu_context *smu,
1644 struct smu_temperature_range *range)
1645{
1646 struct smu_table_context *table_context = &smu->smu_table;
1647 struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
1648
1649 if (!range || !powerplay_table)
1650 return -EINVAL;
1651
1652 /* The unit is temperature */
1653 range->min = 0;
1654 range->max = powerplay_table->software_shutdown_temp;
1655
1656 return 0;
1657}
1658
1533static const struct pptable_funcs navi10_ppt_funcs = { 1659static const struct pptable_funcs navi10_ppt_funcs = {
1534 .tables_init = navi10_tables_init, 1660 .tables_init = navi10_tables_init,
1535 .alloc_dpm_context = navi10_allocate_dpm_context, 1661 .alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1557,6 +1683,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
1557 .unforce_dpm_levels = navi10_unforce_dpm_levels, 1683 .unforce_dpm_levels = navi10_unforce_dpm_levels,
1558 .is_dpm_running = navi10_is_dpm_running, 1684 .is_dpm_running = navi10_is_dpm_running,
1559 .get_fan_speed_percent = navi10_get_fan_speed_percent, 1685 .get_fan_speed_percent = navi10_get_fan_speed_percent,
1686 .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
1560 .get_power_profile_mode = navi10_get_power_profile_mode, 1687 .get_power_profile_mode = navi10_get_power_profile_mode,
1561 .set_power_profile_mode = navi10_set_power_profile_mode, 1688 .set_power_profile_mode = navi10_set_power_profile_mode,
1562 .get_profiling_clk_mask = navi10_get_profiling_clk_mask, 1689 .get_profiling_clk_mask = navi10_get_profiling_clk_mask,
@@ -1565,6 +1692,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
1565 .get_uclk_dpm_states = navi10_get_uclk_dpm_states, 1692 .get_uclk_dpm_states = navi10_get_uclk_dpm_states,
1566 .get_ppfeature_status = navi10_get_ppfeature_status, 1693 .get_ppfeature_status = navi10_get_ppfeature_status,
1567 .set_ppfeature_status = navi10_set_ppfeature_status, 1694 .set_ppfeature_status = navi10_set_ppfeature_status,
1695 .set_performance_level = navi10_set_performance_level,
1696 .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
1568}; 1697};
1569 1698
1570void navi10_set_ppt_funcs(struct smu_context *smu) 1699void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 957288e22f47..620ff17c2fef 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -23,6 +23,10 @@
23#ifndef __NAVI10_PPT_H__ 23#ifndef __NAVI10_PPT_H__
24#define __NAVI10_PPT_H__ 24#define __NAVI10_PPT_H__
25 25
26#define NAVI10_PEAK_SCLK_XTX (1830)
27#define NAVI10_PEAK_SCLK_XT (1755)
28#define NAVI10_PEAK_SCLK_XL (1625)
29
26extern void navi10_set_ppt_funcs(struct smu_context *smu); 30extern void navi10_set_ppt_funcs(struct smu_context *smu);
27 31
28#endif 32#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 95c7c4dae523..53097961bf2b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
326 struct amdgpu_device *adev = smu->adev; 326 struct amdgpu_device *adev = smu->adev;
327 const struct smc_firmware_header_v1_0 *hdr; 327 const struct smc_firmware_header_v1_0 *hdr;
328 int ret, index; 328 int ret, index;
329 uint32_t size; 329 uint32_t size = 0;
330 uint16_t atom_table_size;
330 uint8_t frev, crev; 331 uint8_t frev, crev;
331 void *table; 332 void *table;
332 uint16_t version_major, version_minor; 333 uint16_t version_major, version_minor;
@@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
354 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 355 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
355 powerplayinfo); 356 powerplayinfo);
356 357
357 ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, 358 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
358 (uint8_t **)&table); 359 (uint8_t **)&table);
359 if (ret) 360 if (ret)
360 return ret; 361 return ret;
362 size = atom_table_size;
361 } 363 }
362 364
363 if (!smu->smu_table.power_play_table) 365 if (!smu->smu_table.power_play_table)
@@ -1124,10 +1126,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1124 struct smu_temperature_range *range) 1126 struct smu_temperature_range *range)
1125{ 1127{
1126 struct amdgpu_device *adev = smu->adev; 1128 struct amdgpu_device *adev = smu->adev;
1127 int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * 1129 int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
1128 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1130 int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1129 int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
1130 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1131 uint32_t val; 1131 uint32_t val;
1132 1132
1133 if (!range) 1133 if (!range)
@@ -1138,6 +1138,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1138 if (high > range->max) 1138 if (high > range->max)
1139 high = range->max; 1139 high = range->max;
1140 1140
1141 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
1142 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
1143
1141 if (low > high) 1144 if (low > high)
1142 return -EINVAL; 1145 return -EINVAL;
1143 1146
@@ -1146,8 +1149,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1146 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1149 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1147 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1150 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1148 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1151 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1149 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); 1152 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1150 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); 1153 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1151 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1154 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1152 1155
1153 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1156 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@ -1186,7 +1189,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1186 1189
1187 if (!smu->pm_enabled) 1190 if (!smu->pm_enabled)
1188 return ret; 1191 return ret;
1192
1189 ret = smu_get_thermal_temperature_range(smu, &range); 1193 ret = smu_get_thermal_temperature_range(smu, &range);
1194 if (ret)
1195 return ret;
1190 1196
1191 if (smu->smu_table.thermal_controller_type) { 1197 if (smu->smu_table.thermal_controller_type) {
1192 ret = smu_v11_0_set_thermal_range(smu, &range); 1198 ret = smu_v11_0_set_thermal_range(smu, &range);
@@ -1202,15 +1208,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1202 return ret; 1208 return ret;
1203 } 1209 }
1204 1210
1205 adev->pm.dpm.thermal.min_temp = range.min; 1211 adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1206 adev->pm.dpm.thermal.max_temp = range.max; 1212 adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1207 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; 1213 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1208 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; 1214 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1209 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; 1215 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1210 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; 1216 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1211 adev->pm.dpm.thermal.min_mem_temp = range.mem_min; 1217 adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1212 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; 1218 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1213 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; 1219 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1220 adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1221 adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1214 1222
1215 return ret; 1223 return ret;
1216} 1224}
@@ -1371,23 +1379,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1371 return ret; 1379 return ret;
1372} 1380}
1373 1381
1374static int smu_v11_0_get_current_rpm(struct smu_context *smu,
1375 uint32_t *current_rpm)
1376{
1377 int ret;
1378
1379 ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
1380
1381 if (ret) {
1382 pr_err("Attempt to get current RPM from SMC Failed!\n");
1383 return ret;
1384 }
1385
1386 smu_read_smc_arg(smu, current_rpm);
1387
1388 return 0;
1389}
1390
1391static uint32_t 1382static uint32_t
1392smu_v11_0_get_fan_control_mode(struct smu_context *smu) 1383smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1393{ 1384{
@@ -1402,7 +1393,7 @@ smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1402{ 1393{
1403 int ret = 0; 1394 int ret = 0;
1404 1395
1405 if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1396 if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1406 return 0; 1397 return 0;
1407 1398
1408 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start); 1399 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
@@ -1773,7 +1764,6 @@ static const struct smu_funcs smu_v11_0_funcs = {
1773 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, 1764 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
1774 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, 1765 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
1775 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, 1766 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
1776 .get_current_rpm = smu_v11_0_get_current_rpm,
1777 .get_fan_control_mode = smu_v11_0_get_fan_control_mode, 1767 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
1778 .set_fan_control_mode = smu_v11_0_set_fan_control_mode, 1768 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
1779 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, 1769 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index bb9bb09cfc7a..6a14497257e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
450 memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, 450 memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
451 sizeof(PPTable_t)); 451 sizeof(PPTable_t));
452 452
453 table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
454 table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; 453 table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
455 table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); 454 table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
456 455
@@ -3015,6 +3014,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
3015 return ret; 3014 return ret;
3016} 3015}
3017 3016
3017static int vega20_get_fan_speed_rpm(struct smu_context *smu,
3018 uint32_t *speed)
3019{
3020 int ret;
3021
3022 ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
3023
3024 if (ret) {
3025 pr_err("Attempt to get current RPM from SMC Failed!\n");
3026 return ret;
3027 }
3028
3029 smu_read_smc_arg(smu, speed);
3030
3031 return 0;
3032}
3033
3018static int vega20_get_fan_speed_percent(struct smu_context *smu, 3034static int vega20_get_fan_speed_percent(struct smu_context *smu,
3019 uint32_t *speed) 3035 uint32_t *speed)
3020{ 3036{
@@ -3022,7 +3038,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
3022 uint32_t current_rpm = 0, percent = 0; 3038 uint32_t current_rpm = 0, percent = 0;
3023 PPTable_t *pptable = smu->smu_table.driver_pptable; 3039 PPTable_t *pptable = smu->smu_table.driver_pptable;
3024 3040
3025 ret = smu_get_current_rpm(smu, &current_rpm); 3041 ret = vega20_get_fan_speed_rpm(smu, &current_rpm);
3026 if (ret) 3042 if (ret)
3027 return ret; 3043 return ret;
3028 3044
@@ -3034,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
3034 3050
3035static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) 3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3036{ 3052{
3053 uint32_t smu_version;
3037 int ret = 0; 3054 int ret = 0;
3038 SmuMetrics_t metrics; 3055 SmuMetrics_t metrics;
3039 3056
@@ -3044,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3044 if (ret) 3061 if (ret)
3045 return ret; 3062 return ret;
3046 3063
3047 *value = metrics.CurrSocketPower << 8; 3064 ret = smu_get_smc_version(smu, NULL, &smu_version);
3065 if (ret)
3066 return ret;
3067
3068 /* For the 40.46 release, they changed the value name */
3069 if (smu_version == 0x282e00)
3070 *value = metrics.AverageSocketPower << 8;
3071 else
3072 *value = metrics.CurrSocketPower << 8;
3048 3073
3049 return 0; 3074 return 0;
3050} 3075}
@@ -3217,35 +3242,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
3217 return 0; 3242 return 0;
3218} 3243}
3219 3244
3220static const struct smu_temperature_range vega20_thermal_policy[] =
3221{
3222 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
3223 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
3224};
3225
3226static int vega20_get_thermal_temperature_range(struct smu_context *smu, 3245static int vega20_get_thermal_temperature_range(struct smu_context *smu,
3227 struct smu_temperature_range *range) 3246 struct smu_temperature_range *range)
3228{ 3247{
3229 3248 struct smu_table_context *table_context = &smu->smu_table;
3249 ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
3230 PPTable_t *pptable = smu->smu_table.driver_pptable; 3250 PPTable_t *pptable = smu->smu_table.driver_pptable;
3231 3251
3232 if (!range) 3252 if (!range || !powerplay_table)
3233 return -EINVAL; 3253 return -EINVAL;
3234 3254
3235 memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); 3255 /* The unit is temperature */
3236 3256 range->min = 0;
3237 range->max = pptable->TedgeLimit * 3257 range->max = powerplay_table->usSoftwareShutdownTemp;
3238 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 3258 range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
3239 range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * 3259 range->hotspot_crit_max = pptable->ThotspotLimit;
3240 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 3260 range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
3241 range->hotspot_crit_max = pptable->ThotspotLimit * 3261 range->mem_crit_max = pptable->ThbmLimit;
3242 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 3262 range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
3243 range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
3244 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
3245 range->mem_crit_max = pptable->ThbmLimit *
3246 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
3247 range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
3248 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
3249 3263
3250 3264
3251 return 0; 3265 return 0;
@@ -3293,6 +3307,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
3293 .is_dpm_running = vega20_is_dpm_running, 3307 .is_dpm_running = vega20_is_dpm_running,
3294 .set_thermal_fan_table = vega20_set_thermal_fan_table, 3308 .set_thermal_fan_table = vega20_set_thermal_fan_table,
3295 .get_fan_speed_percent = vega20_get_fan_speed_percent, 3309 .get_fan_speed_percent = vega20_get_fan_speed_percent,
3310 .get_fan_speed_rpm = vega20_get_fan_speed_rpm,
3296 .set_watermarks_table = vega20_set_watermarks_table, 3311 .set_watermarks_table = vega20_set_watermarks_table,
3297 .get_thermal_temperature_range = vega20_get_thermal_temperature_range 3312 .get_thermal_temperature_range = vega20_get_thermal_temperature_range
3298}; 3313};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..9d4d5075cc64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
8#include <linux/iommu.h> 8#include <linux/iommu.h>
9#include <linux/of_device.h> 9#include <linux/of_device.h>
10#include <linux/of_graph.h> 10#include <linux/of_graph.h>
11#include <linux/of_reserved_mem.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#ifdef CONFIG_DEBUG_FS 14#ifdef CONFIG_DEBUG_FS
@@ -126,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
126 pipe->of_output_port = 127 pipe->of_output_port =
127 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); 128 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
128 129
129 pipe->of_node = np; 130 pipe->of_node = of_node_get(np);
130 131
131 return 0; 132 return 0;
132} 133}
@@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
143 return mdev->irq; 144 return mdev->irq;
144 } 145 }
145 146
147 /* Get the optional framebuffer memory resource */
148 ret = of_reserved_mem_device_init(dev);
149 if (ret && ret != -ENODEV)
150 return ret;
151 ret = 0;
152
146 for_each_available_child_of_node(np, child) { 153 for_each_available_child_of_node(np, child) {
147 if (of_node_cmp(child->name, "pipeline") == 0) { 154 if (of_node_cmp(child->name, "pipeline") == 0) {
148 ret = komeda_parse_pipe_dt(mdev, child); 155 ret = komeda_parse_pipe_dt(mdev, child);
@@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
289 296
290 mdev->n_pipelines = 0; 297 mdev->n_pipelines = 0;
291 298
299 of_reserved_mem_device_release(dev);
300
292 if (funcs && funcs->cleanup) 301 if (funcs && funcs->cleanup)
293 funcs->cleanup(mdev); 302 funcs->cleanup(mdev);
294 303
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
35 return NULL; 35 return NULL;
36} 36}
37 37
38u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
39{
40 u32 bpp;
41
42 switch (info->format) {
43 case DRM_FORMAT_YUV420_8BIT:
44 bpp = 12;
45 break;
46 case DRM_FORMAT_YUV420_10BIT:
47 bpp = 15;
48 break;
49 default:
50 bpp = info->cpp[0] * 8;
51 break;
52 }
53
54 return bpp;
55}
56
38/* Two assumptions 57/* Two assumptions
39 * 1. RGB always has YTR 58 * 1. RGB always has YTR
40 * 2. Tiled RGB always has SC 59 * 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
97komeda_get_format_caps(struct komeda_format_caps_table *table, 97komeda_get_format_caps(struct komeda_format_caps_table *table,
98 u32 fourcc, u64 modifier); 98 u32 fourcc, u64 modifier);
99 99
100u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
101 u64 modifier);
102
100u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, 103u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
101 u32 layer_type, u32 *n_fmts); 104 u32 layer_type, u32 *n_fmts);
102 105
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
43 struct drm_framebuffer *fb = &kfb->base; 43 struct drm_framebuffer *fb = &kfb->base;
44 const struct drm_format_info *info = fb->format; 44 const struct drm_format_info *info = fb->format;
45 struct drm_gem_object *obj; 45 struct drm_gem_object *obj;
46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; 46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
47 u64 min_size; 47 u64 min_size;
48 48
49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); 49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, 88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
89 alignment_header); 89 alignment_header);
90 90
91 bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
91 kfb->afbc_size = kfb->offset_payload + n_blocks * 92 kfb->afbc_size = kfb->offset_payload + n_blocks *
92 ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, 93 ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
93 AFBC_SUPERBLK_ALIGNMENT); 94 AFBC_SUPERBLK_ALIGNMENT);
94 min_size = kfb->afbc_size + fb->offsets[0]; 95 min_size = kfb->afbc_size + fb->offsets[0];
95 if (min_size > obj->size) { 96 if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..69d9e26c60c8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
14#include <drm/drm_gem_cma_helper.h> 14#include <drm/drm_gem_cma_helper.h>
15#include <drm/drm_gem_framebuffer_helper.h> 15#include <drm/drm_gem_framebuffer_helper.h>
16#include <drm/drm_irq.h> 16#include <drm/drm_irq.h>
17#include <drm/drm_probe_helper.h>
17#include <drm/drm_vblank.h> 18#include <drm/drm_vblank.h>
18 19
19#include "komeda_dev.h" 20#include "komeda_dev.h"
@@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
146 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 147 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
147 struct komeda_plane_state *kplane_st; 148 struct komeda_plane_state *kplane_st;
148 struct drm_plane_state *plane_st; 149 struct drm_plane_state *plane_st;
149 struct drm_framebuffer *fb;
150 struct drm_plane *plane; 150 struct drm_plane *plane;
151 struct list_head zorder_list; 151 struct list_head zorder_list;
152 int order = 0, err; 152 int order = 0, err;
@@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
172 172
173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
174 plane_st = &kplane_st->base; 174 plane_st = &kplane_st->base;
175 fb = plane_st->fb;
176 plane = plane_st->plane; 175 plane = plane_st->plane;
177 176
178 plane_st->normalized_zpos = order++; 177 plane_st->normalized_zpos = order++;
@@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
205 struct drm_atomic_state *state) 204 struct drm_atomic_state *state)
206{ 205{
207 struct drm_crtc *crtc; 206 struct drm_crtc *crtc;
208 struct drm_crtc_state *old_crtc_st, *new_crtc_st; 207 struct drm_crtc_state *new_crtc_st;
209 int i, err; 208 int i, err;
210 209
211 err = drm_atomic_helper_check_modeset(dev, state); 210 err = drm_atomic_helper_check_modeset(dev, state);
@@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
216 * so need to add all affected_planes (even unchanged) to 215 * so need to add all affected_planes (even unchanged) to
217 * drm_atomic_state. 216 * drm_atomic_state.
218 */ 217 */
219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { 218 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
220 err = drm_atomic_add_affected_planes(state, crtc); 219 err = drm_atomic_add_affected_planes(state, crtc);
221 if (err) 220 if (err)
222 return err; 221 return err;
@@ -307,24 +306,33 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
307 komeda_kms_irq_handler, IRQF_SHARED, 306 komeda_kms_irq_handler, IRQF_SHARED,
308 drm->driver->name, drm); 307 drm->driver->name, drm);
309 if (err) 308 if (err)
310 goto cleanup_mode_config; 309 goto free_component_binding;
311 310
312 err = mdev->funcs->enable_irq(mdev); 311 err = mdev->funcs->enable_irq(mdev);
313 if (err) 312 if (err)
314 goto cleanup_mode_config; 313 goto free_component_binding;
315 314
316 drm->irq_enabled = true; 315 drm->irq_enabled = true;
317 316
317 drm_kms_helper_poll_init(drm);
318
318 err = drm_dev_register(drm, 0); 319 err = drm_dev_register(drm, 0);
319 if (err) 320 if (err)
320 goto cleanup_mode_config; 321 goto free_interrupts;
321 322
322 return kms; 323 return kms;
323 324
324cleanup_mode_config: 325free_interrupts:
326 drm_kms_helper_poll_fini(drm);
325 drm->irq_enabled = false; 327 drm->irq_enabled = false;
328 mdev->funcs->disable_irq(mdev);
329free_component_binding:
330 component_unbind_all(mdev->dev, drm);
331cleanup_mode_config:
326 drm_mode_config_cleanup(drm); 332 drm_mode_config_cleanup(drm);
327 komeda_kms_cleanup_private_objs(kms); 333 komeda_kms_cleanup_private_objs(kms);
334 drm->dev_private = NULL;
335 drm_dev_put(drm);
328free_kms: 336free_kms:
329 kfree(kms); 337 kfree(kms);
330 return ERR_PTR(err); 338 return ERR_PTR(err);
@@ -335,12 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
335 struct drm_device *drm = &kms->base; 343 struct drm_device *drm = &kms->base;
336 struct komeda_dev *mdev = drm->dev_private; 344 struct komeda_dev *mdev = drm->dev_private;
337 345
346 drm_dev_unregister(drm);
347 drm_kms_helper_poll_fini(drm);
348 drm_atomic_helper_shutdown(drm);
338 drm->irq_enabled = false; 349 drm->irq_enabled = false;
339 mdev->funcs->disable_irq(mdev); 350 mdev->funcs->disable_irq(mdev);
340 drm_dev_unregister(drm);
341 component_unbind_all(mdev->dev, drm); 351 component_unbind_all(mdev->dev, drm);
342 komeda_kms_cleanup_private_objs(kms);
343 drm_mode_config_cleanup(drm); 352 drm_mode_config_cleanup(drm);
353 komeda_kms_cleanup_private_objs(kms);
344 drm->dev_private = NULL; 354 drm->dev_private = NULL;
345 drm_dev_put(drm); 355 drm_dev_put(drm);
346} 356}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..14b683164544 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
480 struct seq_file *sf); 480 struct seq_file *sf);
481 481
482/* component APIs */ 482/* component APIs */
483extern __printf(10, 11)
483struct komeda_component * 484struct komeda_component *
484komeda_component_add(struct komeda_pipeline *pipe, 485komeda_component_add(struct komeda_pipeline *pipe,
485 size_t comp_sz, u32 id, u32 hw_id, 486 size_t comp_sz, u32 id, u32 hw_id,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 617e1f7b8472..2851cac94d86 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
148 if (!kcrtc->master->wb_layer) 148 if (!kcrtc->master->wb_layer)
149 return 0; 149 return 0;
150 150
151 kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL); 151 kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
152 if (!kwb_conn) 152 if (!kwb_conn)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..a5d1494a3dc4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
131 131
132 132
133 /* Enable extended register access */ 133 /* Enable extended register access */
134 ast_enable_mmio(dev);
135 ast_open_key(ast); 134 ast_open_key(ast);
135 ast_enable_mmio(dev);
136 136
137 /* Find out whether P2A works or whether to use device-tree */ 137 /* Find out whether P2A works or whether to use device-tree */
138 ast_detect_config_mode(dev, &scu_rev); 138 ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
576{ 576{
577 struct ast_private *ast = dev->dev_private; 577 struct ast_private *ast = dev->dev_private;
578 578
579 /* enable standard VGA decode */
580 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
581
579 ast_release_firmware(dev); 582 ast_release_firmware(dev);
580 kfree(ast->dp501_fw_addr); 583 kfree(ast->dp501_fw_addr);
581 ast_mode_fini(dev); 584 ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..a1cb020e07e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
604 return -EINVAL; 604 return -EINVAL;
605 ast_open_key(ast); 605 ast_open_key(ast);
606 606
607 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 607 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
608 608
609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); 609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); 610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..c1d1ac51d1c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
46{ 46{
47 struct ast_private *ast = dev->dev_private; 47 struct ast_private *ast = dev->dev_private;
48 48
49 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 49 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
50} 50}
51 51
52 52
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index bc19dbd531ef..359030d5d818 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
191 bochs->dev->mode_config.fb_base = bochs->fb_base; 191 bochs->dev->mode_config.fb_base = bochs->fb_base;
192 bochs->dev->mode_config.preferred_depth = 24; 192 bochs->dev->mode_config.preferred_depth = 24;
193 bochs->dev->mode_config.prefer_shadow = 0; 193 bochs->dev->mode_config.prefer_shadow = 0;
194 bochs->dev->mode_config.prefer_shadow_fbdev = 1;
194 bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; 195 bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
195 196
196 bochs->dev->mode_config.funcs = &bochs_mode_funcs; 197 bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ee777469293a..e4e22bbae2a7 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
48config DRM_LVDS_ENCODER 48config DRM_LVDS_ENCODER
49 tristate "Transparent parallel to LVDS encoder support" 49 tristate "Transparent parallel to LVDS encoder support"
50 depends on OF 50 depends on OF
51 select DRM_KMS_HELPER
51 select DRM_PANEL_BRIDGE 52 select DRM_PANEL_BRIDGE
52 help 53 help
53 Support for transparent parallel to LVDS encoders that don't require 54 Support for transparent parallel to LVDS encoders that don't require
@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
116 117
117config DRM_TOSHIBA_TC358764 118config DRM_TOSHIBA_TC358764
118 tristate "TC358764 DSI/LVDS bridge" 119 tristate "TC358764 DSI/LVDS bridge"
119 depends on DRM && DRM_PANEL
120 depends on OF 120 depends on OF
121 select DRM_MIPI_DSI 121 select DRM_MIPI_DSI
122 select DRM_KMS_HELPER
123 select DRM_PANEL
122 help 124 help
123 Toshiba TC358764 DSI/LVDS bridge driver. 125 Toshiba TC358764 DSI/LVDS bridge driver.
124 126
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 410572f14257..e1dafb0cc5e2 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
254 struct drm_device *dev = client->dev; 254 struct drm_device *dev = client->dev;
255 struct drm_client_buffer *buffer; 255 struct drm_client_buffer *buffer;
256 struct drm_gem_object *obj; 256 struct drm_gem_object *obj;
257 void *vaddr;
258 int ret; 257 int ret;
259 258
260 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 259 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
281 280
282 buffer->gem = obj; 281 buffer->gem = obj;
283 282
283 return buffer;
284
285err_delete:
286 drm_client_buffer_delete(buffer);
287
288 return ERR_PTR(ret);
289}
290
291/**
292 * drm_client_buffer_vmap - Map DRM client buffer into address space
293 * @buffer: DRM client buffer
294 *
295 * This function maps a client buffer into kernel address space. If the
296 * buffer is already mapped, it returns the mapping's address.
297 *
298 * Client buffer mappings are not ref'counted. Each call to
299 * drm_client_buffer_vmap() should be followed by a call to
300 * drm_client_buffer_vunmap(); or the client buffer should be mapped
301 * throughout its lifetime.
302 *
303 * Returns:
304 * The mapped memory's address
305 */
306void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
307{
308 void *vaddr;
309
310 if (buffer->vaddr)
311 return buffer->vaddr;
312
284 /* 313 /*
285 * FIXME: The dependency on GEM here isn't required, we could 314 * FIXME: The dependency on GEM here isn't required, we could
286 * convert the driver handle to a dma-buf instead and use the 315 * convert the driver handle to a dma-buf instead and use the
@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
289 * fd_install step out of the driver backend hooks, to make that 318 * fd_install step out of the driver backend hooks, to make that
290 * final step optional for internal users. 319 * final step optional for internal users.
291 */ 320 */
292 vaddr = drm_gem_vmap(obj); 321 vaddr = drm_gem_vmap(buffer->gem);
293 if (IS_ERR(vaddr)) { 322 if (IS_ERR(vaddr))
294 ret = PTR_ERR(vaddr); 323 return vaddr;
295 goto err_delete;
296 }
297 324
298 buffer->vaddr = vaddr; 325 buffer->vaddr = vaddr;
299 326
300 return buffer; 327 return vaddr;
301 328}
302err_delete: 329EXPORT_SYMBOL(drm_client_buffer_vmap);
303 drm_client_buffer_delete(buffer);
304 330
305 return ERR_PTR(ret); 331/**
332 * drm_client_buffer_vunmap - Unmap DRM client buffer
333 * @buffer: DRM client buffer
334 *
335 * This function removes a client buffer's memory mapping. Calling this
336 * function is only required by clients that manage their buffer mappings
337 * by themselves.
338 */
339void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
340{
341 drm_gem_vunmap(buffer->gem, buffer->vaddr);
342 buffer->vaddr = NULL;
306} 343}
344EXPORT_SYMBOL(drm_client_buffer_vunmap);
307 345
308static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) 346static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
309{ 347{
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 56d36779d213..c8922b7cac09 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
859 * simple XOR between the two handle the addition nicely. 859 * simple XOR between the two handle the addition nicely.
860 */ 860 */
861 cmdline = &connector->cmdline_mode; 861 cmdline = &connector->cmdline_mode;
862 if (cmdline->specified) { 862 if (cmdline->specified && cmdline->rotation_reflection) {
863 unsigned int cmdline_rest, panel_rest; 863 unsigned int cmdline_rest, panel_rest;
864 unsigned int cmdline_rot, panel_rot; 864 unsigned int cmdline_rot, panel_rot;
865 unsigned int sum_rot, sum_rest; 865 unsigned int sum_rot, sum_rest;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1984e5c54d58..a7ba5b4902d6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
403 struct drm_clip_rect *clip = &helper->dirty_clip; 403 struct drm_clip_rect *clip = &helper->dirty_clip;
404 struct drm_clip_rect clip_copy; 404 struct drm_clip_rect clip_copy;
405 unsigned long flags; 405 unsigned long flags;
406 void *vaddr;
406 407
407 spin_lock_irqsave(&helper->dirty_lock, flags); 408 spin_lock_irqsave(&helper->dirty_lock, flags);
408 clip_copy = *clip; 409 clip_copy = *clip;
@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
412 413
413 /* call dirty callback only when it has been really touched */ 414 /* call dirty callback only when it has been really touched */
414 if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { 415 if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
416
415 /* Generic fbdev uses a shadow buffer */ 417 /* Generic fbdev uses a shadow buffer */
416 if (helper->buffer) 418 if (helper->buffer) {
419 vaddr = drm_client_buffer_vmap(helper->buffer);
420 if (IS_ERR(vaddr))
421 return;
417 drm_fb_helper_dirty_blit_real(helper, &clip_copy); 422 drm_fb_helper_dirty_blit_real(helper, &clip_copy);
418 helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 423 }
424 if (helper->fb->funcs->dirty)
425 helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
426 &clip_copy, 1);
427
428 if (helper->buffer)
429 drm_client_buffer_vunmap(helper->buffer);
419 } 430 }
420} 431}
421 432
@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
604} 615}
605EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); 616EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
606 617
618static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
619{
620 struct drm_device *dev = fb_helper->dev;
621 struct drm_framebuffer *fb = fb_helper->fb;
622
623 return dev->mode_config.prefer_shadow_fbdev ||
624 dev->mode_config.prefer_shadow ||
625 fb->funcs->dirty;
626}
627
607static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, 628static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
608 u32 width, u32 height) 629 u32 width, u32 height)
609{ 630{
@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
611 struct drm_clip_rect *clip = &helper->dirty_clip; 632 struct drm_clip_rect *clip = &helper->dirty_clip;
612 unsigned long flags; 633 unsigned long flags;
613 634
614 if (!helper->fb->funcs->dirty) 635 if (!drm_fbdev_use_shadow_fb(helper))
615 return; 636 return;
616 637
617 spin_lock_irqsave(&helper->dirty_lock, flags); 638 spin_lock_irqsave(&helper->dirty_lock, flags);
@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
2178 struct drm_framebuffer *fb; 2199 struct drm_framebuffer *fb;
2179 struct fb_info *fbi; 2200 struct fb_info *fbi;
2180 u32 format; 2201 u32 format;
2202 void *vaddr;
2181 2203
2182 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", 2204 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
2183 sizes->surface_width, sizes->surface_height, 2205 sizes->surface_width, sizes->surface_height,
@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
2200 fbi->fbops = &drm_fbdev_fb_ops; 2222 fbi->fbops = &drm_fbdev_fb_ops;
2201 fbi->screen_size = fb->height * fb->pitches[0]; 2223 fbi->screen_size = fb->height * fb->pitches[0];
2202 fbi->fix.smem_len = fbi->screen_size; 2224 fbi->fix.smem_len = fbi->screen_size;
2203 fbi->screen_buffer = buffer->vaddr; 2225
2204 /* Shamelessly leak the physical address to user-space */
2205#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
2206 if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
2207 fbi->fix.smem_start =
2208 page_to_phys(virt_to_page(fbi->screen_buffer));
2209#endif
2210 drm_fb_helper_fill_info(fbi, fb_helper, sizes); 2226 drm_fb_helper_fill_info(fbi, fb_helper, sizes);
2211 2227
2212 if (fb->funcs->dirty) { 2228 if (drm_fbdev_use_shadow_fb(fb_helper)) {
2213 struct fb_ops *fbops; 2229 struct fb_ops *fbops;
2214 void *shadow; 2230 void *shadow;
2215 2231
@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
2231 fbi->fbdefio = &drm_fbdev_defio; 2247 fbi->fbdefio = &drm_fbdev_defio;
2232 2248
2233 fb_deferred_io_init(fbi); 2249 fb_deferred_io_init(fbi);
2250 } else {
2251 /* buffer is mapped for HW framebuffer */
2252 vaddr = drm_client_buffer_vmap(fb_helper->buffer);
2253 if (IS_ERR(vaddr))
2254 return PTR_ERR(vaddr);
2255
2256 fbi->screen_buffer = vaddr;
2257 /* Shamelessly leak the physical address to user-space */
2258#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
2259 if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
2260 fbi->fix.smem_start =
2261 page_to_phys(virt_to_page(fbi->screen_buffer));
2262#endif
2234 } 2263 }
2235 2264
2236 return 0; 2265 return 0;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 0b72468e8131..57564318ceea 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
835 struct drm_device *dev = fb->dev; 835 struct drm_device *dev = fb->dev;
836 struct drm_atomic_state *state; 836 struct drm_atomic_state *state;
837 struct drm_plane *plane; 837 struct drm_plane *plane;
838 struct drm_connector *conn; 838 struct drm_connector *conn __maybe_unused;
839 struct drm_connector_state *conn_state; 839 struct drm_connector_state *conn_state;
840 int i, ret; 840 int i, ret;
841 unsigned plane_mask; 841 unsigned plane_mask;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 74a5739df506..b0369e690f36 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1686,7 +1686,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
1686 * 1686 *
1687 * Additionals options can be provided following the mode, using a comma to 1687 * Additionals options can be provided following the mode, using a comma to
1688 * separate each option. Valid options can be found in 1688 * separate each option. Valid options can be found in
1689 * Documentation/fb/modedb.txt. 1689 * Documentation/fb/modedb.rst.
1690 * 1690 *
1691 * The intermediate drm_cmdline_mode structure is required to store additional 1691 * The intermediate drm_cmdline_mode structure is required to store additional
1692 * options from the command line modline like the force-enable/disable flag. 1692 * options from the command line modline like the force-enable/disable flag.
@@ -1770,7 +1770,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1770 } 1770 }
1771 1771
1772 if (named_mode) { 1772 if (named_mode) {
1773 strncpy(mode->name, name, mode_end); 1773 if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
1774 return false;
1775 strscpy(mode->name, name, mode_end + 1);
1774 } else { 1776 } else {
1775 ret = drm_mode_parse_cmdline_res_mode(name, mode_end, 1777 ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
1776 parse_extras, 1778 parse_extras,
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 60ce4a8ad9e1..6f7d3b3b3628 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@
2config DRM_EXYNOS 2config DRM_EXYNOS
3 tristate "DRM Support for Samsung SoC EXYNOS Series" 3 tristate "DRM Support for Samsung SoC EXYNOS Series"
4 depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) 4 depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
5 depends on MMU
5 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
6 select VIDEOMODE_HELPERS 7 select VIDEOMODE_HELPERS
7 select SND_SOC_HDMI_CODEC if SND_SOC 8 select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index a594ab7be2c0..164d914cbe9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc;
44module_param_named(fimc_devs, fimc_mask, uint, 0644); 44module_param_named(fimc_devs, fimc_mask, uint, 0644);
45MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); 45MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
46 46
47#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) 47#define get_fimc_context(dev) dev_get_drvdata(dev)
48 48
49enum { 49enum {
50 FIMC_CLK_LCLK, 50 FIMC_CLK_LCLK,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 50904eee96f7..2a3382d43bc9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d)
267static int g2d_init_cmdlist(struct g2d_data *g2d) 267static int g2d_init_cmdlist(struct g2d_data *g2d)
268{ 268{
269 struct device *dev = g2d->dev; 269 struct device *dev = g2d->dev;
270 struct g2d_cmdlist_node *node = g2d->cmdlist_node; 270 struct g2d_cmdlist_node *node;
271 int nr; 271 int nr;
272 int ret; 272 int ret;
273 struct g2d_buf_info *buf_info; 273 struct g2d_buf_info *buf_info;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1e4b21c49a06..1c524db9570f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -58,7 +58,7 @@
58#define GSC_COEF_DEPTH 3 58#define GSC_COEF_DEPTH 3
59#define GSC_AUTOSUSPEND_DELAY 2000 59#define GSC_AUTOSUSPEND_DELAY 2000
60 60
61#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) 61#define get_gsc_context(dev) dev_get_drvdata(dev)
62#define gsc_read(offset) readl(ctx->regs + (offset)) 62#define gsc_read(offset) readl(ctx->regs + (offset))
63#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) 63#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
64 64
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 9af096479e1c..b24ba948b725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
94 scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); 94 scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
95 do { 95 do {
96 cpu_relax(); 96 cpu_relax();
97 } while (retry > 1 && 97 } while (--retry > 1 &&
98 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); 98 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
99 do { 99 do {
100 cpu_relax(); 100 cpu_relax();
101 scaler_write(1, SCALER_INT_EN); 101 scaler_write(1, SCALER_INT_EN);
102 } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); 102 } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
103 103
104 return retry ? 0 : -EIO; 104 return retry ? 0 : -EIO;
105} 105}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91355c2ea8a5..8cace65f50ce 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra
16subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) 16subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
17subdir-ccflags-y += $(call cc-disable-warning, type-limits) 17subdir-ccflags-y += $(call cc-disable-warning, type-limits)
18subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) 18subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
19subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
20subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) 19subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
21# clang warnings 20# clang warnings
22subdir-ccflags-y += $(call cc-disable-warning, sign-compare) 21subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index c4710889cb32..3ef4e9f573cf 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
765 } 765 }
766 766
767 if (bdb->version >= 226) { 767 if (bdb->version >= 226) {
768 u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; 768 u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
769 769
770 wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; 770 wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
771 switch (wakeup_time) { 771 switch (wakeup_time) {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 753ac3165061..7b908e10d32e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
178 clpchgroup = (sa->deburst * deinterleave / num_channels) << i; 178 clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
179 bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; 179 bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
180 180
181 bi->num_qgv_points = qi.num_points;
182
181 for (j = 0; j < qi.num_points; j++) { 183 for (j = 0; j < qi.num_points; j++) {
182 const struct intel_qgv_point *sp = &qi.points[j]; 184 const struct intel_qgv_point *sp = &qi.points[j];
183 int ct, bw; 185 int ct, bw;
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
195 bi->deratedbw[j] = min(maxdebw, 197 bi->deratedbw[j] = min(maxdebw,
196 bw * 9 / 10); /* 90% */ 198 bw * 9 / 10); /* 90% */
197 199
198 DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", 200 DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
199 i, j, bi->num_planes, bi->deratedbw[j]); 201 i, j, bi->num_planes, bi->deratedbw[j]);
200 } 202 }
201 203
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
211{ 213{
212 int i; 214 int i;
213 215
214 /* Did we initialize the bw limits successfully? */
215 if (dev_priv->max_bw[0].num_planes == 0)
216 return UINT_MAX;
217
218 for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { 216 for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
219 const struct intel_bw_info *bi = 217 const struct intel_bw_info *bi =
220 &dev_priv->max_bw[i]; 218 &dev_priv->max_bw[i];
221 219
220 /*
221 * Pcode will not expose all QGV points when
222 * SAGV is forced to off/min/med/max.
223 */
224 if (qgv_point >= bi->num_qgv_points)
225 return UINT_MAX;
226
222 if (num_planes >= bi->num_planes) 227 if (num_planes >= bi->num_planes)
223 return bi->deratedbw[qgv_point]; 228 return bi->deratedbw[qgv_point];
224 } 229 }
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8993ab283562..0d19bbd08122 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2240,6 +2240,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2240 min_cdclk = max(2 * 96000, min_cdclk); 2240 min_cdclk = max(2 * 96000, min_cdclk);
2241 2241
2242 /* 2242 /*
2243 * "For DP audio configuration, cdclk frequency shall be set to
2244 * meet the following requirements:
2245 * DP Link Frequency(MHz) | Cdclk frequency(MHz)
2246 * 270 | 320 or higher
2247 * 162 | 200 or higher"
2248 */
2249 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2250 intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
2251 min_cdclk = max(crtc_state->port_clock, min_cdclk);
2252
2253 /*
2243 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower 2254 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
2244 * than 320000KHz. 2255 * than 320000KHz.
2245 */ 2256 */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..1cb1fa74cfbc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1465 else if (intel_crtc_has_dp_encoder(pipe_config)) 1465 else if (intel_crtc_has_dp_encoder(pipe_config))
1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1467 &pipe_config->dp_m_n); 1467 &pipe_config->dp_m_n);
1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
1469 dotclock = pipe_config->port_clock * 2 / 3; 1469 dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
1470 else 1470 else
1471 dotclock = pipe_config->port_clock; 1471 dotclock = pipe_config->port_clock;
1472 1472
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8592a7d422de..592b92782fab 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1839 /* FIXME: assert CPU port conditions for SNB+ */ 1839 /* FIXME: assert CPU port conditions for SNB+ */
1840 } 1840 }
1841 1841
1842 trace_intel_pipe_enable(dev_priv, pipe); 1842 trace_intel_pipe_enable(crtc);
1843 1843
1844 reg = PIPECONF(cpu_transcoder); 1844 reg = PIPECONF(cpu_transcoder);
1845 val = I915_READ(reg); 1845 val = I915_READ(reg);
@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1880 */ 1880 */
1881 assert_planes_disabled(crtc); 1881 assert_planes_disabled(crtc);
1882 1882
1883 trace_intel_pipe_disable(dev_priv, pipe); 1883 trace_intel_pipe_disable(crtc);
1884 1884
1885 reg = PIPECONF(cpu_transcoder); 1885 reg = PIPECONF(cpu_transcoder);
1886 val = I915_READ(reg); 1886 val = I915_READ(reg);
@@ -12042,7 +12042,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12042 case INTEL_OUTPUT_DDI: 12042 case INTEL_OUTPUT_DDI:
12043 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12043 if (WARN_ON(!HAS_DDI(to_i915(dev))))
12044 break; 12044 break;
12045 /* else: fall through */ 12045 /* else, fall through */
12046 case INTEL_OUTPUT_DP: 12046 case INTEL_OUTPUT_DP:
12047 case INTEL_OUTPUT_HDMI: 12047 case INTEL_OUTPUT_HDMI:
12048 case INTEL_OUTPUT_EDP: 12048 case INTEL_OUTPUT_EDP:
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c93ad512014c..2d1939db108f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
438#define ICL_AUX_PW_TO_CH(pw_idx) \ 438#define ICL_AUX_PW_TO_CH(pw_idx) \
439 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 439 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
440 440
441#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
442 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
443
441static void 444static void
442icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 445icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
443 struct i915_power_well *power_well) 446 struct i915_power_well *power_well)
444{ 447{
445 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); 448 int pw_idx = power_well->desc->hsw.idx;
449 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
450 enum aux_ch aux_ch;
446 u32 val; 451 u32 val;
447 452
453 aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
454 ICL_AUX_PW_TO_CH(pw_idx);
448 val = I915_READ(DP_AUX_CH_CTL(aux_ch)); 455 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
449 val &= ~DP_AUX_CH_CTL_TBT_IO; 456 val &= ~DP_AUX_CH_CTL_TBT_IO;
450 if (power_well->desc->hsw.is_tc_tbt) 457 if (is_tbt)
451 val |= DP_AUX_CH_CTL_TBT_IO; 458 val |= DP_AUX_CH_CTL_TBT_IO;
452 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); 459 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
453 460
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 4336df46fe78..d0fc34826771 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -231,6 +231,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
231 switch (lane_info) { 231 switch (lane_info) {
232 default: 232 default:
233 MISSING_CASE(lane_info); 233 MISSING_CASE(lane_info);
234 /* fall through */
234 case 1: 235 case 1:
235 case 2: 236 case 2:
236 case 4: 237 case 4:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..18e4cba76720 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -539,7 +539,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
539 539
540 intel_attach_force_audio_property(connector); 540 intel_attach_force_audio_property(connector);
541 intel_attach_broadcast_rgb_property(connector); 541 intel_attach_broadcast_rgb_property(connector);
542 drm_connector_attach_max_bpc_property(connector, 6, 12); 542
543 /*
544 * Reuse the prop from the SST connector because we're
545 * not allowed to create new props after device registration.
546 */
547 connector->max_bpc_property =
548 intel_dp->attached_connector->base.max_bpc_property;
549 if (connector->max_bpc_property)
550 drm_connector_attach_max_bpc_property(connector, 6, 12);
543 551
544 return connector; 552 return connector;
545 553
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index bc3a94d491c4..27bd7276a82d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -536,7 +536,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
536 536
537 if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { 537 if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
538 DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); 538 DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
539 return -EPERM; 539 ret = -EPERM;
540 goto err;
540 } 541 }
541 542
542 /* 543 /*
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 2f4894e9a03d..5ddbe71ab423 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -478,13 +478,13 @@ struct psr_table {
478 /* TP wake up time in multiple of 100 */ 478 /* TP wake up time in multiple of 100 */
479 u16 tp1_wakeup_time; 479 u16 tp1_wakeup_time;
480 u16 tp2_tp3_wakeup_time; 480 u16 tp2_tp3_wakeup_time;
481
482 /* PSR2 TP2/TP3 wakeup time for 16 panels */
483 u32 psr2_tp2_tp3_wakeup_time;
484} __packed; 481} __packed;
485 482
486struct bdb_psr { 483struct bdb_psr {
487 struct psr_table psr_table[16]; 484 struct psr_table psr_table[16];
485
486 /* PSR2 TP2/TP3 wakeup time for 16 panels */
487 u32 psr2_tp2_tp3_wakeup_time;
488} __packed; 488} __packed;
489 489
490/* 490/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..f413904a3e96 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | 541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); 542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
543 DRM_INFO("PPS2 = 0x%08x\n", pps_val); 543 DRM_INFO("PPS2 = 0x%08x\n", pps_val);
544 if (encoder->type == INTEL_OUTPUT_EDP) { 544 if (cpu_transcoder == TRANSCODER_EDP) {
545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); 545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
546 /* 546 /*
547 * If 2 VDSC instances are needed, configure PPS for second 547 * If 2 VDSC instances are needed, configure PPS for second
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 99cc3e2e9c2c..f016a776a39e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -396,8 +396,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
396 else 396 else
397 txesc2_div = 10; 397 txesc2_div = 10;
398 398
399 I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK); 399 I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
400 I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK); 400 I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
401} 401}
402 402
403/* Program BXT Mipi clocks and dividers */ 403/* Program BXT Mipi clocks and dividers */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5fae0e50aad0..41dab9ea33cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1628,6 +1628,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1628 1628
1629static int eb_copy_relocations(const struct i915_execbuffer *eb) 1629static int eb_copy_relocations(const struct i915_execbuffer *eb)
1630{ 1630{
1631 struct drm_i915_gem_relocation_entry *relocs;
1631 const unsigned int count = eb->buffer_count; 1632 const unsigned int count = eb->buffer_count;
1632 unsigned int i; 1633 unsigned int i;
1633 int err; 1634 int err;
@@ -1635,7 +1636,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
1635 for (i = 0; i < count; i++) { 1636 for (i = 0; i < count; i++) {
1636 const unsigned int nreloc = eb->exec[i].relocation_count; 1637 const unsigned int nreloc = eb->exec[i].relocation_count;
1637 struct drm_i915_gem_relocation_entry __user *urelocs; 1638 struct drm_i915_gem_relocation_entry __user *urelocs;
1638 struct drm_i915_gem_relocation_entry *relocs;
1639 unsigned long size; 1639 unsigned long size;
1640 unsigned long copied; 1640 unsigned long copied;
1641 1641
@@ -1663,14 +1663,8 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
1663 1663
1664 if (__copy_from_user((char *)relocs + copied, 1664 if (__copy_from_user((char *)relocs + copied,
1665 (char __user *)urelocs + copied, 1665 (char __user *)urelocs + copied,
1666 len)) { 1666 len))
1667end_user: 1667 goto end;
1668 user_access_end();
1669end:
1670 kvfree(relocs);
1671 err = -EFAULT;
1672 goto err;
1673 }
1674 1668
1675 copied += len; 1669 copied += len;
1676 } while (copied < size); 1670 } while (copied < size);
@@ -1699,10 +1693,14 @@ end:
1699 1693
1700 return 0; 1694 return 0;
1701 1695
1696end_user:
1697 user_access_end();
1698end:
1699 kvfree(relocs);
1700 err = -EFAULT;
1702err: 1701err:
1703 while (i--) { 1702 while (i--) {
1704 struct drm_i915_gem_relocation_entry *relocs = 1703 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1705 u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1706 if (eb->exec[i].relocation_count) 1704 if (eb->exec[i].relocation_count)
1707 kvfree(relocs); 1705 kvfree(relocs);
1708 } 1706 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 391621ee3cbb..39a661927d8e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -341,7 +341,7 @@ err:
341 */ 341 */
342 if (!i915_terminally_wedged(i915)) 342 if (!i915_terminally_wedged(i915))
343 return VM_FAULT_SIGBUS; 343 return VM_FAULT_SIGBUS;
344 /* else: fall through */ 344 /* else, fall through */
345 case -EAGAIN: 345 case -EAGAIN:
346 /* 346 /*
347 * EAGAIN means the gpu is hung and we'll wait for the error 347 * EAGAIN means the gpu is hung and we'll wait for the error
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b36ad269f4ea..65eb430cedba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
268 switch (type) { 268 switch (type) {
269 default: 269 default:
270 MISSING_CASE(type); 270 MISSING_CASE(type);
271 /* fallthrough to use PAGE_KERNEL anyway */ 271 /* fallthrough - to use PAGE_KERNEL anyway */
272 case I915_MAP_WB: 272 case I915_MAP_WB:
273 pgprot = PAGE_KERNEL; 273 pgprot = PAGE_KERNEL;
274 break; 274 break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 05011d4a3b88..914b5d4112bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
253 i915_gem_restore_gtt_mappings(i915); 253 i915_gem_restore_gtt_mappings(i915);
254 i915_gem_restore_fences(i915); 254 i915_gem_restore_fences(i915);
255 255
256 if (i915_gem_init_hw(i915))
257 goto err_wedged;
258
256 /* 259 /*
257 * As we didn't flush the kernel context before suspend, we cannot 260 * As we didn't flush the kernel context before suspend, we cannot
258 * guarantee that the context image is complete. So let's just reset 261 * guarantee that the context image is complete. So let's just reset
259 * it and start again. 262 * it and start again.
260 */ 263 */
261 intel_gt_resume(i915); 264 if (intel_gt_resume(i915))
262
263 if (i915_gem_init_hw(i915))
264 goto err_wedged; 265 goto err_wedged;
265 266
266 intel_uc_resume(i915); 267 intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b61678334..2caa594322bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
664 664
665 for_each_sgt_page(page, sgt_iter, pages) { 665 for_each_sgt_page(page, sgt_iter, pages) {
666 if (obj->mm.dirty) 666 if (obj->mm.dirty)
667 set_page_dirty(page); 667 /*
668 * As this may not be anonymous memory (e.g. shmem)
669 * but exist on a real mapping, we have to lock
670 * the page in order to dirty it -- holding
671 * the page reference is not sufficient to
672 * prevent the inode from being truncated.
673 * Play safe and take the lock.
674 */
675 set_page_dirty_lock(page);
668 676
669 mark_page_accessed(page); 677 mark_page_accessed(page);
670 put_page(page); 678 put_page(page);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2c454f227c2e..23120901c55f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active)
126 if (ce->state) 126 if (ce->state)
127 __context_unpin_state(ce->state); 127 __context_unpin_state(ce->state);
128 128
129 intel_ring_unpin(ce->ring);
129 intel_context_put(ce); 130 intel_context_put(ce);
130} 131}
131 132
@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
160 161
161 intel_context_get(ce); 162 intel_context_get(ce);
162 163
164 err = intel_ring_pin(ce->ring);
165 if (err)
166 goto err_put;
167
163 if (!ce->state) 168 if (!ce->state)
164 return 0; 169 return 0;
165 170
166 err = __context_pin_state(ce->state, flags); 171 err = __context_pin_state(ce->state, flags);
167 if (err) { 172 if (err)
168 i915_active_cancel(&ce->active); 173 goto err_ring;
169 intel_context_put(ce);
170 return err;
171 }
172 174
173 /* Preallocate tracking nodes */ 175 /* Preallocate tracking nodes */
174 if (!i915_gem_context_is_kernel(ce->gem_context)) { 176 if (!i915_gem_context_is_kernel(ce->gem_context)) {
175 err = i915_active_acquire_preallocate_barrier(&ce->active, 177 err = i915_active_acquire_preallocate_barrier(&ce->active,
176 ce->engine); 178 ce->engine);
177 if (err) { 179 if (err)
178 i915_active_release(&ce->active); 180 goto err_state;
179 return err;
180 }
181 } 181 }
182 182
183 return 0; 183 return 0;
184
185err_state:
186 __context_unpin_state(ce->state);
187err_ring:
188 intel_ring_unpin(ce->ring);
189err_put:
190 intel_context_put(ce);
191 i915_active_cancel(&ce->active);
192 return err;
184} 193}
185 194
186void intel_context_active_release(struct intel_context *ce) 195void intel_context_active_release(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7fd33e81c2d9..f25632c9b292 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
969u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) 969u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
970{ 970{
971 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; 971 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
972 unsigned int slice = fls(sseu->slice_mask) - 1;
973 unsigned int subslice;
972 u32 mcr_s_ss_select; 974 u32 mcr_s_ss_select;
973 u32 slice = fls(sseu->slice_mask); 975
974 u32 subslice = fls(sseu->subslice_mask[slice]); 976 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
977 subslice = fls(sseu->subslice_mask[slice]);
978 GEM_BUG_ON(!subslice);
979 subslice--;
975 980
976 if (IS_GEN(dev_priv, 10)) 981 if (IS_GEN(dev_priv, 10))
977 mcr_s_ss_select = GEN8_MCR_SLICE(slice) | 982 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1471 struct i915_gpu_error * const error = &engine->i915->gpu_error; 1476 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1472 struct i915_request *rq; 1477 struct i915_request *rq;
1473 intel_wakeref_t wakeref; 1478 intel_wakeref_t wakeref;
1479 unsigned long flags;
1474 1480
1475 if (header) { 1481 if (header) {
1476 va_list ap; 1482 va_list ap;
@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1490 i915_reset_engine_count(error, engine), 1496 i915_reset_engine_count(error, engine),
1491 i915_reset_count(error)); 1497 i915_reset_count(error));
1492 1498
1493 rcu_read_lock();
1494
1495 drm_printf(m, "\tRequests:\n"); 1499 drm_printf(m, "\tRequests:\n");
1496 1500
1501 spin_lock_irqsave(&engine->active.lock, flags);
1497 rq = intel_engine_find_active_request(engine); 1502 rq = intel_engine_find_active_request(engine);
1498 if (rq) { 1503 if (rq) {
1499 print_request(m, rq, "\t\tactive "); 1504 print_request(m, rq, "\t\tactive ");
@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
1513 1518
1514 print_request_ring(m, rq); 1519 print_request_ring(m, rq);
1515 } 1520 }
1516 1521 spin_unlock_irqrestore(&engine->active.lock, flags);
1517 rcu_read_unlock();
1518 1522
1519 wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); 1523 wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
1520 if (wakeref) { 1524 if (wakeref) {
@@ -1672,7 +1676,6 @@ struct i915_request *
1672intel_engine_find_active_request(struct intel_engine_cs *engine) 1676intel_engine_find_active_request(struct intel_engine_cs *engine)
1673{ 1677{
1674 struct i915_request *request, *active = NULL; 1678 struct i915_request *request, *active = NULL;
1675 unsigned long flags;
1676 1679
1677 /* 1680 /*
1678 * We are called by the error capture, reset and to dump engine 1681 * We are called by the error capture, reset and to dump engine
@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
1685 * At all other times, we must assume the GPU is still running, but 1688 * At all other times, we must assume the GPU is still running, but
1686 * we only care about the snapshot of this moment. 1689 * we only care about the snapshot of this moment.
1687 */ 1690 */
1688 spin_lock_irqsave(&engine->active.lock, flags); 1691 lockdep_assert_held(&engine->active.lock);
1689 list_for_each_entry(request, &engine->active.requests, sched.link) { 1692 list_for_each_entry(request, &engine->active.requests, sched.link) {
1690 if (i915_request_completed(request)) 1693 if (i915_request_completed(request))
1691 continue; 1694 continue;
@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
1700 active = request; 1703 active = request;
1701 break; 1704 break;
1702 } 1705 }
1703 spin_unlock_irqrestore(&engine->active.lock, flags);
1704 1706
1705 return active; 1707 return active;
1706} 1708}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 2ce00d3dc42a..ae5b6baf6dff 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
142{ 142{
143 intel_wakeref_init(&engine->wakeref); 143 intel_wakeref_init(&engine->wakeref);
144} 144}
145
146int intel_engines_resume(struct drm_i915_private *i915)
147{
148 struct intel_engine_cs *engine;
149 enum intel_engine_id id;
150 int err = 0;
151
152 intel_gt_pm_get(i915);
153 for_each_engine(engine, i915, id) {
154 intel_engine_pm_get(engine);
155 engine->serial++; /* kernel context lost */
156 err = engine->resume(engine);
157 intel_engine_pm_put(engine);
158 if (err) {
159 dev_err(i915->drm.dev,
160 "Failed to restart %s (%d)\n",
161 engine->name, err);
162 break;
163 }
164 }
165 intel_gt_pm_put(i915);
166
167 return err;
168}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index b326cd993d60..a11c893f64c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,16 +7,22 @@
7#ifndef INTEL_ENGINE_PM_H 7#ifndef INTEL_ENGINE_PM_H
8#define INTEL_ENGINE_PM_H 8#define INTEL_ENGINE_PM_H
9 9
10#include "intel_engine_types.h"
11#include "intel_wakeref.h"
12
10struct drm_i915_private; 13struct drm_i915_private;
11struct intel_engine_cs;
12 14
13void intel_engine_pm_get(struct intel_engine_cs *engine); 15void intel_engine_pm_get(struct intel_engine_cs *engine);
14void intel_engine_pm_put(struct intel_engine_cs *engine); 16void intel_engine_pm_put(struct intel_engine_cs *engine);
15 17
18static inline bool
19intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
20{
21 return intel_wakeref_get_if_active(&engine->wakeref);
22}
23
16void intel_engine_park(struct intel_engine_cs *engine); 24void intel_engine_park(struct intel_engine_cs *engine);
17 25
18void intel_engine_init__pm(struct intel_engine_cs *engine); 26void intel_engine_init__pm(struct intel_engine_cs *engine);
19 27
20int intel_engines_resume(struct drm_i915_private *i915);
21
22#endif /* INTEL_ENGINE_PM_H */ 28#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 868b220214f8..43e975a26016 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -70,6 +70,18 @@ struct intel_ring {
70 struct list_head request_list; 70 struct list_head request_list;
71 struct list_head active_link; 71 struct list_head active_link;
72 72
73 /*
74 * As we have two types of rings, one global to the engine used
75 * by ringbuffer submission and those that are exclusive to a
76 * context used by execlists, we have to play safe and allow
77 * atomic updates to the pin_count. However, the actual pinning
78 * of the context is either done during initialisation for
79 * ringbuffer submission or serialised as part of the context
80 * pinning for execlists, and so we do not need a mutex ourselves
81 * to serialise intel_ring_pin/intel_ring_unpin.
82 */
83 atomic_t pin_count;
84
73 u32 head; 85 u32 head;
74 u32 tail; 86 u32 tail;
75 u32 emit; 87 u32 emit;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7b5967751762..9f8f7f54191f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include "i915_drv.h" 7#include "i915_drv.h"
8#include "intel_engine_pm.h"
8#include "intel_gt_pm.h" 9#include "intel_gt_pm.h"
9#include "intel_pm.h" 10#include "intel_pm.h"
10#include "intel_wakeref.h" 11#include "intel_wakeref.h"
@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
118 intel_engine_reset(engine, false); 119 intel_engine_reset(engine, false);
119} 120}
120 121
121void intel_gt_resume(struct drm_i915_private *i915) 122int intel_gt_resume(struct drm_i915_private *i915)
122{ 123{
123 struct intel_engine_cs *engine; 124 struct intel_engine_cs *engine;
124 enum intel_engine_id id; 125 enum intel_engine_id id;
126 int err = 0;
125 127
126 /* 128 /*
127 * After resume, we may need to poke into the pinned kernel 129 * After resume, we may need to poke into the pinned kernel
@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
129 * Only the kernel contexts should remain pinned over suspend, 131 * Only the kernel contexts should remain pinned over suspend,
130 * allowing us to fixup the user contexts on their first pin. 132 * allowing us to fixup the user contexts on their first pin.
131 */ 133 */
134 intel_gt_pm_get(i915);
132 for_each_engine(engine, i915, id) { 135 for_each_engine(engine, i915, id) {
133 struct intel_context *ce; 136 struct intel_context *ce;
134 137
138 intel_engine_pm_get(engine);
139
135 ce = engine->kernel_context; 140 ce = engine->kernel_context;
136 if (ce) 141 if (ce)
137 ce->ops->reset(ce); 142 ce->ops->reset(ce);
@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
139 ce = engine->preempt_context; 144 ce = engine->preempt_context;
140 if (ce) 145 if (ce)
141 ce->ops->reset(ce); 146 ce->ops->reset(ce);
147
148 engine->serial++; /* kernel context lost */
149 err = engine->resume(engine);
150
151 intel_engine_pm_put(engine);
152 if (err) {
153 dev_err(i915->drm.dev,
154 "Failed to restart %s (%d)\n",
155 engine->name, err);
156 break;
157 }
142 } 158 }
159 intel_gt_pm_put(i915);
160
161 return err;
143} 162}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 7dd1130a19a4..53f342b20181 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
22void intel_gt_pm_init(struct drm_i915_private *i915); 22void intel_gt_pm_init(struct drm_i915_private *i915);
23 23
24void intel_gt_sanitize(struct drm_i915_private *i915, bool force); 24void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
25void intel_gt_resume(struct drm_i915_private *i915); 25int intel_gt_resume(struct drm_i915_private *i915);
26 26
27#endif /* INTEL_GT_PM_H */ 27#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b42b5f158295..82b7ace62d97 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref)
1414{ 1414{
1415 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 1415 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
1416 1416
1417 GEM_BUG_ON(!i915_active_is_idle(&ce->active));
1417 GEM_BUG_ON(intel_context_is_pinned(ce)); 1418 GEM_BUG_ON(intel_context_is_pinned(ce));
1418 1419
1419 if (ce->state) 1420 if (ce->state)
@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce)
1426{ 1427{
1427 i915_gem_context_unpin_hw_id(ce->gem_context); 1428 i915_gem_context_unpin_hw_id(ce->gem_context);
1428 i915_gem_object_unpin_map(ce->state->obj); 1429 i915_gem_object_unpin_map(ce->state->obj);
1429 intel_ring_unpin(ce->ring);
1430} 1430}
1431 1431
1432static void 1432static void
@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce,
1478 goto unpin_active; 1478 goto unpin_active;
1479 } 1479 }
1480 1480
1481 ret = intel_ring_pin(ce->ring);
1482 if (ret)
1483 goto unpin_map;
1484
1485 ret = i915_gem_context_pin_hw_id(ce->gem_context); 1481 ret = i915_gem_context_pin_hw_id(ce->gem_context);
1486 if (ret) 1482 if (ret)
1487 goto unpin_ring; 1483 goto unpin_map;
1488 1484
1489 ce->lrc_desc = lrc_descriptor(ce, engine); 1485 ce->lrc_desc = lrc_descriptor(ce, engine);
1490 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 1486 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce,
1492 1488
1493 return 0; 1489 return 0;
1494 1490
1495unpin_ring:
1496 intel_ring_unpin(ce->ring);
1497unpin_map: 1491unpin_map:
1498 i915_gem_object_unpin_map(ce->state->obj); 1492 i915_gem_object_unpin_map(ce->state->obj);
1499unpin_active: 1493unpin_active:
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4c478b38e420..3f907701ef4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
687 * written to the powercontext is undefined and so we may lose 687 * written to the powercontext is undefined and so we may lose
688 * GPU state upon resume, i.e. fail to restart after a reset. 688 * GPU state upon resume, i.e. fail to restart after a reset.
689 */ 689 */
690 intel_engine_pm_get(engine);
691 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 690 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
692 engine->reset.prepare(engine); 691 engine->reset.prepare(engine);
693} 692}
@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
718 } 717 }
719} 718}
720 719
721static void reset_prepare(struct drm_i915_private *i915) 720static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
722{ 721{
723 struct intel_engine_cs *engine; 722 struct intel_engine_cs *engine;
723 intel_engine_mask_t awake = 0;
724 enum intel_engine_id id; 724 enum intel_engine_id id;
725 725
726 intel_gt_pm_get(i915); 726 for_each_engine(engine, i915, id) {
727 for_each_engine(engine, i915, id) 727 if (intel_engine_pm_get_if_awake(engine))
728 awake |= engine->mask;
728 reset_prepare_engine(engine); 729 reset_prepare_engine(engine);
730 }
729 731
730 intel_uc_reset_prepare(i915); 732 intel_uc_reset_prepare(i915);
733
734 return awake;
731} 735}
732 736
733static void gt_revoke(struct drm_i915_private *i915) 737static void gt_revoke(struct drm_i915_private *i915)
@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
761static void reset_finish_engine(struct intel_engine_cs *engine) 765static void reset_finish_engine(struct intel_engine_cs *engine)
762{ 766{
763 engine->reset.finish(engine); 767 engine->reset.finish(engine);
764 intel_engine_pm_put(engine);
765 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 768 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
769
770 intel_engine_signal_breadcrumbs(engine);
766} 771}
767 772
768static void reset_finish(struct drm_i915_private *i915) 773static void reset_finish(struct drm_i915_private *i915,
774 intel_engine_mask_t awake)
769{ 775{
770 struct intel_engine_cs *engine; 776 struct intel_engine_cs *engine;
771 enum intel_engine_id id; 777 enum intel_engine_id id;
772 778
773 for_each_engine(engine, i915, id) { 779 for_each_engine(engine, i915, id) {
774 reset_finish_engine(engine); 780 reset_finish_engine(engine);
775 intel_engine_signal_breadcrumbs(engine); 781 if (awake & engine->mask)
782 intel_engine_pm_put(engine);
776 } 783 }
777 intel_gt_pm_put(i915);
778} 784}
779 785
780static void nop_submit_request(struct i915_request *request) 786static void nop_submit_request(struct i915_request *request)
@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
798{ 804{
799 struct i915_gpu_error *error = &i915->gpu_error; 805 struct i915_gpu_error *error = &i915->gpu_error;
800 struct intel_engine_cs *engine; 806 struct intel_engine_cs *engine;
807 intel_engine_mask_t awake;
801 enum intel_engine_id id; 808 enum intel_engine_id id;
802 809
803 if (test_bit(I915_WEDGED, &error->flags)) 810 if (test_bit(I915_WEDGED, &error->flags))
@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
817 * rolling the global seqno forward (since this would complete requests 824 * rolling the global seqno forward (since this would complete requests
818 * for which we haven't set the fence error to EIO yet). 825 * for which we haven't set the fence error to EIO yet).
819 */ 826 */
820 reset_prepare(i915); 827 awake = reset_prepare(i915);
821 828
822 /* Even if the GPU reset fails, it should still stop the engines */ 829 /* Even if the GPU reset fails, it should still stop the engines */
823 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) 830 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
841 for_each_engine(engine, i915, id) 848 for_each_engine(engine, i915, id)
842 engine->cancel_requests(engine); 849 engine->cancel_requests(engine);
843 850
844 reset_finish(i915); 851 reset_finish(i915, awake);
845 852
846 GEM_TRACE("end\n"); 853 GEM_TRACE("end\n");
847} 854}
@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915,
951 return gt_reset(i915, stalled_mask); 958 return gt_reset(i915, stalled_mask);
952} 959}
953 960
961static int resume(struct drm_i915_private *i915)
962{
963 struct intel_engine_cs *engine;
964 enum intel_engine_id id;
965 int ret;
966
967 for_each_engine(engine, i915, id) {
968 ret = engine->resume(engine);
969 if (ret)
970 return ret;
971 }
972
973 return 0;
974}
975
954/** 976/**
955 * i915_reset - reset chip after a hang 977 * i915_reset - reset chip after a hang
956 * @i915: #drm_i915_private to reset 978 * @i915: #drm_i915_private to reset
@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
973 const char *reason) 995 const char *reason)
974{ 996{
975 struct i915_gpu_error *error = &i915->gpu_error; 997 struct i915_gpu_error *error = &i915->gpu_error;
998 intel_engine_mask_t awake;
976 int ret; 999 int ret;
977 1000
978 GEM_TRACE("flags=%lx\n", error->flags); 1001 GEM_TRACE("flags=%lx\n", error->flags);
@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
989 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); 1012 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
990 error->reset_count++; 1013 error->reset_count++;
991 1014
992 reset_prepare(i915); 1015 awake = reset_prepare(i915);
993 1016
994 if (!intel_has_gpu_reset(i915)) { 1017 if (!intel_has_gpu_reset(i915)) {
995 if (i915_modparams.reset) 1018 if (i915_modparams.reset)
@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915,
1024 if (ret) { 1047 if (ret) {
1025 DRM_ERROR("Failed to initialise HW following reset (%d)\n", 1048 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1026 ret); 1049 ret);
1027 goto error; 1050 goto taint;
1028 } 1051 }
1029 1052
1053 ret = resume(i915);
1054 if (ret)
1055 goto taint;
1056
1030 i915_queue_hangcheck(i915); 1057 i915_queue_hangcheck(i915);
1031 1058
1032finish: 1059finish:
1033 reset_finish(i915); 1060 reset_finish(i915, awake);
1034unlock: 1061unlock:
1035 mutex_unlock(&error->wedge_mutex); 1062 mutex_unlock(&error->wedge_mutex);
1036 return; 1063 return;
@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1081 GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); 1108 GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
1082 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); 1109 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1083 1110
1084 if (!intel_wakeref_active(&engine->wakeref)) 1111 if (!intel_engine_pm_get_if_awake(engine))
1085 return 0; 1112 return 0;
1086 1113
1087 reset_prepare_engine(engine); 1114 reset_prepare_engine(engine);
@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1116 * process to program RING_MODE, HWSP and re-enable submission. 1143 * process to program RING_MODE, HWSP and re-enable submission.
1117 */ 1144 */
1118 ret = engine->resume(engine); 1145 ret = engine->resume(engine);
1119 if (ret)
1120 goto out;
1121 1146
1122out: 1147out:
1123 intel_engine_cancel_stop_cs(engine); 1148 intel_engine_cancel_stop_cs(engine);
1124 reset_finish_engine(engine); 1149 reset_finish_engine(engine);
1150 intel_engine_pm_put(engine);
1125 return ret; 1151 return ret;
1126} 1152}
1127 1153
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index c6023bc9452d..12010e798868 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq,
1149int intel_ring_pin(struct intel_ring *ring) 1149int intel_ring_pin(struct intel_ring *ring)
1150{ 1150{
1151 struct i915_vma *vma = ring->vma; 1151 struct i915_vma *vma = ring->vma;
1152 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
1153 unsigned int flags; 1152 unsigned int flags;
1154 void *addr; 1153 void *addr;
1155 int ret; 1154 int ret;
1156 1155
1157 GEM_BUG_ON(ring->vaddr); 1156 if (atomic_fetch_inc(&ring->pin_count))
1157 return 0;
1158 1158
1159 ret = i915_timeline_pin(ring->timeline); 1159 ret = i915_timeline_pin(ring->timeline);
1160 if (ret) 1160 if (ret)
1161 return ret; 1161 goto err_unpin;
1162 1162
1163 flags = PIN_GLOBAL; 1163 flags = PIN_GLOBAL;
1164 1164
@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring)
1172 1172
1173 ret = i915_vma_pin(vma, 0, 0, flags); 1173 ret = i915_vma_pin(vma, 0, 0, flags);
1174 if (unlikely(ret)) 1174 if (unlikely(ret))
1175 goto unpin_timeline; 1175 goto err_timeline;
1176 1176
1177 if (i915_vma_is_map_and_fenceable(vma)) 1177 if (i915_vma_is_map_and_fenceable(vma))
1178 addr = (void __force *)i915_vma_pin_iomap(vma); 1178 addr = (void __force *)i915_vma_pin_iomap(vma);
1179 else 1179 else
1180 addr = i915_gem_object_pin_map(vma->obj, map); 1180 addr = i915_gem_object_pin_map(vma->obj,
1181 i915_coherent_map_type(vma->vm->i915));
1181 if (IS_ERR(addr)) { 1182 if (IS_ERR(addr)) {
1182 ret = PTR_ERR(addr); 1183 ret = PTR_ERR(addr);
1183 goto unpin_ring; 1184 goto err_ring;
1184 } 1185 }
1185 1186
1186 vma->obj->pin_global++; 1187 vma->obj->pin_global++;
1187 1188
1189 GEM_BUG_ON(ring->vaddr);
1188 ring->vaddr = addr; 1190 ring->vaddr = addr;
1191
1189 return 0; 1192 return 0;
1190 1193
1191unpin_ring: 1194err_ring:
1192 i915_vma_unpin(vma); 1195 i915_vma_unpin(vma);
1193unpin_timeline: 1196err_timeline:
1194 i915_timeline_unpin(ring->timeline); 1197 i915_timeline_unpin(ring->timeline);
1198err_unpin:
1199 atomic_dec(&ring->pin_count);
1195 return ret; 1200 return ret;
1196} 1201}
1197 1202
@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
1207 1212
1208void intel_ring_unpin(struct intel_ring *ring) 1213void intel_ring_unpin(struct intel_ring *ring)
1209{ 1214{
1210 GEM_BUG_ON(!ring->vma); 1215 if (!atomic_dec_and_test(&ring->pin_count))
1211 GEM_BUG_ON(!ring->vaddr); 1216 return;
1212 1217
1213 /* Discard any unused bytes beyond that submitted to hw. */ 1218 /* Discard any unused bytes beyond that submitted to hw. */
1214 intel_ring_reset(ring, ring->tail); 1219 intel_ring_reset(ring, ring->tail);
1215 1220
1221 GEM_BUG_ON(!ring->vma);
1216 if (i915_vma_is_map_and_fenceable(ring->vma)) 1222 if (i915_vma_is_map_and_fenceable(ring->vma))
1217 i915_vma_unpin_iomap(ring->vma); 1223 i915_vma_unpin_iomap(ring->vma);
1218 else 1224 else
1219 i915_gem_object_unpin_map(ring->vma->obj); 1225 i915_gem_object_unpin_map(ring->vma->obj);
1226
1227 GEM_BUG_ON(!ring->vaddr);
1220 ring->vaddr = NULL; 1228 ring->vaddr = NULL;
1221 1229
1222 ring->vma->obj->pin_global--; 1230 ring->vma->obj->pin_global--;
@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
2081 WARN_ON(INTEL_GEN(dev_priv) > 2 && 2089 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2082 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 2090 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
2083 2091
2092 intel_engine_cleanup_common(engine);
2093
2084 intel_ring_unpin(engine->buffer); 2094 intel_ring_unpin(engine->buffer);
2085 intel_ring_put(engine->buffer); 2095 intel_ring_put(engine->buffer);
2086 2096
2087 intel_engine_cleanup_common(engine);
2088 kfree(engine); 2097 kfree(engine);
2089} 2098}
2090 2099
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 15e90fd2cfdc..98dfb086320f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
1098 1098
1099static void cfl_whitelist_build(struct intel_engine_cs *engine) 1099static void cfl_whitelist_build(struct intel_engine_cs *engine)
1100{ 1100{
1101 struct i915_wa_list *w = &engine->whitelist;
1102
1101 if (engine->class != RENDER_CLASS) 1103 if (engine->class != RENDER_CLASS)
1102 return; 1104 return;
1103 1105
1104 gen9_whitelist_build(&engine->whitelist); 1106 gen9_whitelist_build(w);
1107
1108 /*
1109 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1110 *
1111 * This covers 4 register which are next to one another :
1112 * - PS_INVOCATION_COUNT
1113 * - PS_INVOCATION_COUNT_UDW
1114 * - PS_DEPTH_COUNT
1115 * - PS_DEPTH_COUNT_UDW
1116 */
1117 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1118 RING_FORCE_TO_NONPRIV_RD |
1119 RING_FORCE_TO_NONPRIV_RANGE_4);
1105} 1120}
1106 1121
1107static void cnl_whitelist_build(struct intel_engine_cs *engine) 1122static void cnl_whitelist_build(struct intel_engine_cs *engine)
@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
1129 1144
1130 /* WaEnableStateCacheRedirectToCS:icl */ 1145 /* WaEnableStateCacheRedirectToCS:icl */
1131 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); 1146 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1147
1148 /*
1149 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1150 *
1151 * This covers 4 register which are next to one another :
1152 * - PS_INVOCATION_COUNT
1153 * - PS_INVOCATION_COUNT_UDW
1154 * - PS_DEPTH_COUNT
1155 * - PS_DEPTH_COUNT_UDW
1156 */
1157 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1158 RING_FORCE_TO_NONPRIV_RD |
1159 RING_FORCE_TO_NONPRIV_RANGE_4);
1132 break; 1160 break;
1133 1161
1134 case VIDEO_DECODE_CLASS: 1162 case VIDEO_DECODE_CLASS:
@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1258 if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) 1286 if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
1259 wa_write_or(wal, 1287 wa_write_or(wal,
1260 GEN7_SARCHKMD, 1288 GEN7_SARCHKMD,
1261 GEN7_DISABLE_DEMAND_PREFETCH | 1289 GEN7_DISABLE_DEMAND_PREFETCH);
1262 GEN7_DISABLE_SAMPLER_PREFETCH); 1290
1291 /* Wa_1606682166:icl */
1292 wa_write_or(wal,
1293 GEN7_SARCHKMD,
1294 GEN7_DISABLE_SAMPLER_PREFETCH);
1263 } 1295 }
1264 1296
1265 if (IS_GEN_RANGE(i915, 9, 11)) { 1297 if (IS_GEN_RANGE(i915, 9, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 086801b51441..486c6953dcb1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
66 ring->base.effective_size = sz; 66 ring->base.effective_size = sz;
67 ring->base.vaddr = (void *)(ring + 1); 67 ring->base.vaddr = (void *)(ring + 1);
68 ring->base.timeline = &ring->timeline; 68 ring->base.timeline = &ring->timeline;
69 atomic_set(&ring->base.pin_count, 1);
69 70
70 INIT_LIST_HEAD(&ring->base.request_list); 71 INIT_LIST_HEAD(&ring->base.request_list);
71 intel_ring_update_space(&ring->base); 72 intel_ring_update_space(&ring->base);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7cc1ba..b5c590c9ccba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
71 goto unlock; 71 goto unlock;
72 72
73 for (p = igt_atomic_phases; p->name; p++) { 73 for (p = igt_atomic_phases; p->name; p++) {
74 intel_engine_mask_t awake;
75
74 GEM_TRACE("intel_gpu_reset under %s\n", p->name); 76 GEM_TRACE("intel_gpu_reset under %s\n", p->name);
75 77
78 awake = reset_prepare(i915);
76 p->critical_section_begin(); 79 p->critical_section_begin();
77 reset_prepare(i915); 80 reset_prepare(i915);
78 err = intel_gpu_reset(i915, ALL_ENGINES); 81 err = intel_gpu_reset(i915, ALL_ENGINES);
79 reset_finish(i915);
80 p->critical_section_end(); 82 p->critical_section_end();
83 reset_finish(i915, awake);
81 84
82 if (err) { 85 if (err) {
83 pr_err("intel_gpu_reset failed under %s\n", p->name); 86 pr_err("intel_gpu_reset failed under %s\n", p->name);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 9eaf030affd0..44becd9538be 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
925 925
926 err = 0; 926 err = 0;
927 for (i = 0; i < engine->whitelist.count; i++) { 927 for (i = 0; i < engine->whitelist.count; i++) {
928 if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) 928 const struct i915_wa *wa = &engine->whitelist.list[i];
929
930 if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
931 continue;
932
933 if (!fn(engine, a[i], b[i], wa->reg))
929 err = -EINVAL; 934 err = -EINVAL;
930 } 935 }
931 936
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6ea88270c818..b09dc315e2da 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
2674 gma_head == gma_tail) 2674 gma_head == gma_tail)
2675 return 0; 2675 return 0;
2676 2676
2677 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2678 ret = -EINVAL;
2679 goto out;
2680 }
2681
2682 ret = ip_gma_set(&s, gma_head); 2677 ret = ip_gma_set(&s, gma_head);
2683 if (ret) 2678 if (ret)
2684 goto out; 2679 goto out;
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2724 s.workload = workload; 2719 s.workload = workload;
2725 s.is_ctx_wa = true; 2720 s.is_ctx_wa = true;
2726 2721
2727 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2728 ret = -EINVAL;
2729 goto out;
2730 }
2731
2732 ret = ip_gma_set(&s, gma_head); 2722 ret = ip_gma_set(&s, gma_head);
2733 if (ret) 2723 if (ret)
2734 goto out; 2724 goto out;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 65e847392aea..8bb292b01271 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
245 plane->hw_format = fmt; 245 plane->hw_format = fmt;
246 246
247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; 247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
248 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 248 if (!vgpu_gmadr_is_valid(vgpu, plane->base))
249 return -EINVAL; 249 return -EINVAL;
250 250
251 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 251 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
368 alpha_plane, alpha_force); 368 alpha_plane, alpha_force);
369 369
370 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; 370 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
371 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 371 if (!vgpu_gmadr_is_valid(vgpu, plane->base))
372 return -EINVAL; 372 return -EINVAL;
373 373
374 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 374 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
472 plane->drm_format = drm_format; 472 plane->drm_format = drm_format;
473 473
474 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; 474 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
475 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 475 if (!vgpu_gmadr_is_valid(vgpu, plane->base))
476 return -EINVAL; 476 return -EINVAL;
477 477
478 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 478 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53115bdae12b..4b04af569c05 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2141 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 2141 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2142 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2142 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2143 unsigned long index = off >> info->gtt_entry_size_shift; 2143 unsigned long index = off >> info->gtt_entry_size_shift;
2144 unsigned long gma;
2144 struct intel_gvt_gtt_entry e; 2145 struct intel_gvt_gtt_entry e;
2145 2146
2146 if (bytes != 4 && bytes != 8) 2147 if (bytes != 4 && bytes != 8)
2147 return -EINVAL; 2148 return -EINVAL;
2148 2149
2150 gma = index << I915_GTT_PAGE_SHIFT;
2151 if (!intel_gvt_ggtt_validate_range(vgpu,
2152 gma, 1 << I915_GTT_PAGE_SHIFT)) {
2153 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2154 memset(p_data, 0, bytes);
2155 return 0;
2156 }
2157
2149 ggtt_get_guest_entry(ggtt_mm, &e, index); 2158 ggtt_get_guest_entry(ggtt_mm, &e, index);
2150 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 2159 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2151 bytes); 2160 bytes);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 144301b778df..23aa3e50cbf8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1911 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); 1911 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1912 if (ret) 1912 if (ret)
1913 goto err_unmap; 1913 goto err_unmap;
1914 } else if (entry->size != size) {
1915 /* the same gfn with different size: unmap and re-map */
1916 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1917 __gvt_cache_remove_entry(vgpu, entry);
1918
1919 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1920 if (ret)
1921 goto err_unlock;
1922
1923 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1924 if (ret)
1925 goto err_unmap;
1914 } else { 1926 } else {
1915 kref_get(&entry->ref); 1927 kref_get(&entry->ref);
1916 *dma_addr = entry->dma_addr; 1928 *dma_addr = entry->dma_addr;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 2144fb46d0e1..75baff657e43 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
364 wa_ctx->indirect_ctx.shadow_va = NULL; 364 wa_ctx->indirect_ctx.shadow_va = NULL;
365} 365}
366 366
367static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 367static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
368 struct i915_gem_context *ctx) 368 struct i915_gem_context *ctx)
369{ 369{
370 struct intel_vgpu_mm *mm = workload->shadow_mm; 370 struct intel_vgpu_mm *mm = workload->shadow_mm;
371 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); 371 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
372 int i = 0; 372 int i = 0;
373 373
374 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
375 return -EINVAL;
376
377 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 374 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
378 px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; 375 px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
379 } else { 376 } else {
@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
384 px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; 381 px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
385 } 382 }
386 } 383 }
387
388 return 0;
389} 384}
390 385
391static int 386static int
@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
614static int prepare_workload(struct intel_vgpu_workload *workload) 609static int prepare_workload(struct intel_vgpu_workload *workload)
615{ 610{
616 struct intel_vgpu *vgpu = workload->vgpu; 611 struct intel_vgpu *vgpu = workload->vgpu;
612 struct intel_vgpu_submission *s = &vgpu->submission;
613 int ring = workload->ring_id;
617 int ret = 0; 614 int ret = 0;
618 615
619 ret = intel_vgpu_pin_mm(workload->shadow_mm); 616 ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
622 return ret; 619 return ret;
623 } 620 }
624 621
622 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
623 !workload->shadow_mm->ppgtt_mm.shadowed) {
624 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
625 return -EINVAL;
626 }
627
625 update_shadow_pdps(workload); 628 update_shadow_pdps(workload);
626 629
630 set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
631
627 ret = intel_vgpu_sync_oos_pages(workload->vgpu); 632 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
628 if (ret) { 633 if (ret) {
629 gvt_vgpu_err("fail to vgpu sync oos pages\n"); 634 gvt_vgpu_err("fail to vgpu sync oos pages\n");
@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
674{ 679{
675 struct intel_vgpu *vgpu = workload->vgpu; 680 struct intel_vgpu *vgpu = workload->vgpu;
676 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 681 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
677 struct intel_vgpu_submission *s = &vgpu->submission;
678 struct i915_request *rq; 682 struct i915_request *rq;
679 int ring_id = workload->ring_id; 683 int ring_id = workload->ring_id;
680 int ret; 684 int ret;
@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
685 mutex_lock(&vgpu->vgpu_lock); 689 mutex_lock(&vgpu->vgpu_lock);
686 mutex_lock(&dev_priv->drm.struct_mutex); 690 mutex_lock(&dev_priv->drm.struct_mutex);
687 691
688 ret = set_context_ppgtt_from_shadow(workload,
689 s->shadow[ring_id]->gem_context);
690 if (ret < 0) {
691 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
692 goto err_req;
693 }
694
695 ret = intel_gvt_workload_req_alloc(workload); 692 ret = intel_gvt_workload_req_alloc(workload);
696 if (ret) 693 if (ret)
697 goto err_req; 694 goto err_req;
@@ -990,6 +987,7 @@ static int workload_thread(void *priv)
990 int ret; 987 int ret;
991 bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); 988 bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
992 DEFINE_WAIT_FUNC(wait, woken_wake_function); 989 DEFINE_WAIT_FUNC(wait, woken_wake_function);
990 struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
993 991
994 kfree(p); 992 kfree(p);
995 993
@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
1013 workload->ring_id, workload, 1011 workload->ring_id, workload,
1014 workload->vgpu->id); 1012 workload->vgpu->id);
1015 1013
1014 intel_runtime_pm_get(rpm);
1015
1016 gvt_dbg_sched("ring id %d will dispatch workload %p\n", 1016 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
1017 workload->ring_id, workload); 1017 workload->ring_id, workload);
1018 1018
@@ -1042,6 +1042,7 @@ complete:
1042 intel_uncore_forcewake_put(&gvt->dev_priv->uncore, 1042 intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
1043 FORCEWAKE_ALL); 1043 FORCEWAKE_ALL);
1044 1044
1045 intel_runtime_pm_put_unchecked(rpm);
1045 if (ret && (vgpu_is_vm_unhealthy(ret))) 1046 if (ret && (vgpu_is_vm_unhealthy(ret)))
1046 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1047 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1047 } 1048 }
@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1492 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1493 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1493 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); 1494 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1494 1495
1496 if (!intel_gvt_ggtt_validate_range(vgpu, start,
1497 _RING_CTL_BUF_SIZE(ctl))) {
1498 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1499 return ERR_PTR(-EINVAL);
1500 }
1501
1495 workload = alloc_workload(vgpu); 1502 workload = alloc_workload(vgpu);
1496 if (IS_ERR(workload)) 1503 if (IS_ERR(workload))
1497 return workload; 1504 return workload;
@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1516 workload->wa_ctx.indirect_ctx.size = 1523 workload->wa_ctx.indirect_ctx.size =
1517 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * 1524 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1518 CACHELINE_BYTES; 1525 CACHELINE_BYTES;
1526
1527 if (workload->wa_ctx.indirect_ctx.size != 0) {
1528 if (!intel_gvt_ggtt_validate_range(vgpu,
1529 workload->wa_ctx.indirect_ctx.guest_gma,
1530 workload->wa_ctx.indirect_ctx.size)) {
1531 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1532 workload->wa_ctx.indirect_ctx.guest_gma);
1533 kmem_cache_free(s->workloads, workload);
1534 return ERR_PTR(-EINVAL);
1535 }
1536 }
1537
1519 workload->wa_ctx.per_ctx.guest_gma = 1538 workload->wa_ctx.per_ctx.guest_gma =
1520 per_ctx & PER_CTX_ADDR_MASK; 1539 per_ctx & PER_CTX_ADDR_MASK;
1521 workload->wa_ctx.per_ctx.valid = per_ctx & 1; 1540 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1541 if (workload->wa_ctx.per_ctx.valid) {
1542 if (!intel_gvt_ggtt_validate_range(vgpu,
1543 workload->wa_ctx.per_ctx.guest_gma,
1544 CACHELINE_BYTES)) {
1545 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1546 workload->wa_ctx.per_ctx.guest_gma);
1547 kmem_cache_free(s->workloads, workload);
1548 return ERR_PTR(-EINVAL);
1549 }
1550 }
1522 } 1551 }
1523 1552
1524 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", 1553 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c
index a3deed692b9c..fe552e877e09 100644
--- a/drivers/gpu/drm/i915/gvt/trace_points.c
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -28,8 +28,6 @@
28 * 28 *
29 */ 29 */
30 30
31#include "trace.h"
32
33#ifndef __CHECKER__ 31#ifndef __CHECKER__
34#define CREATE_TRACE_POINTS 32#define CREATE_TRACE_POINTS
35#include "trace.h" 33#include "trace.h"
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..bac1ee94f63f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1598 1598
1599 pci_set_master(pdev); 1599 pci_set_master(pdev);
1600 1600
1601 /*
1602 * We don't have a max segment size, so set it to the max so sg's
1603 * debugging layer doesn't complain
1604 */
1605 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1606
1601 /* overlay on gen2 is broken and can't address above 1G */ 1607 /* overlay on gen2 is broken and can't address above 1G */
1602 if (IS_GEN(dev_priv, 2)) { 1608 if (IS_GEN(dev_priv, 2)) {
1603 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1609 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc909ec5d9c3..fe7a6ec2c199 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1674,8 +1674,9 @@ struct drm_i915_private {
1674 } dram_info; 1674 } dram_info;
1675 1675
1676 struct intel_bw_info { 1676 struct intel_bw_info {
1677 int num_planes; 1677 unsigned int deratedbw[3]; /* for each QGV point */
1678 int deratedbw[3]; 1678 u8 num_qgv_points;
1679 u8 num_planes;
1679 } max_bw[6]; 1680 } max_bw[6];
1680 1681
1681 struct drm_private_obj bw_obj; 1682 struct drm_private_obj bw_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 190ad54fb072..8a659d3d7435 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@
46#include "gem/i915_gem_ioctls.h" 46#include "gem/i915_gem_ioctls.h"
47#include "gem/i915_gem_pm.h" 47#include "gem/i915_gem_pm.h"
48#include "gem/i915_gemfs.h" 48#include "gem/i915_gemfs.h"
49#include "gt/intel_engine_pm.h"
50#include "gt/intel_gt_pm.h" 49#include "gt/intel_gt_pm.h"
51#include "gt/intel_mocs.h" 50#include "gt/intel_mocs.h"
52#include "gt/intel_reset.h" 51#include "gt/intel_reset.h"
@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1307 1306
1308 intel_mocs_init_l3cc_table(dev_priv); 1307 intel_mocs_init_l3cc_table(dev_priv);
1309 1308
1310 /* Only when the HW is re-initialised, can we replay the requests */
1311 ret = intel_engines_resume(dev_priv);
1312 if (ret)
1313 goto cleanup_uc;
1314
1315 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1309 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1316 1310
1317 intel_engines_set_scheduler_caps(dev_priv); 1311 intel_engines_set_scheduler_caps(dev_priv);
1318 return 0; 1312 return 0;
1319 1313
1320cleanup_uc:
1321 intel_uc_fini_hw(dev_priv);
1322out: 1314out:
1323 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1315 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1324
1325 return ret; 1316 return ret;
1326} 1317}
1327 1318
@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
1580 if (ret) 1571 if (ret)
1581 goto err_uc_init; 1572 goto err_uc_init;
1582 1573
1574 /* Only when the HW is re-initialised, can we replay the requests */
1575 ret = intel_gt_resume(dev_priv);
1576 if (ret)
1577 goto err_init_hw;
1578
1583 /* 1579 /*
1584 * Despite its name intel_init_clock_gating applies both display 1580 * Despite its name intel_init_clock_gating applies both display
1585 * clock gating workarounds; GT mmio workarounds and the occasional 1581 * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
1593 1589
1594 ret = intel_engines_verify_workarounds(dev_priv); 1590 ret = intel_engines_verify_workarounds(dev_priv);
1595 if (ret) 1591 if (ret)
1596 goto err_init_hw; 1592 goto err_gt;
1597 1593
1598 ret = __intel_engines_record_defaults(dev_priv); 1594 ret = __intel_engines_record_defaults(dev_priv);
1599 if (ret) 1595 if (ret)
1600 goto err_init_hw; 1596 goto err_gt;
1601 1597
1602 if (i915_inject_load_failure()) { 1598 if (i915_inject_load_failure()) {
1603 ret = -ENODEV; 1599 ret = -ENODEV;
1604 goto err_init_hw; 1600 goto err_gt;
1605 } 1601 }
1606 1602
1607 if (i915_inject_load_failure()) { 1603 if (i915_inject_load_failure()) {
1608 ret = -EIO; 1604 ret = -EIO;
1609 goto err_init_hw; 1605 goto err_gt;
1610 } 1606 }
1611 1607
1612 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1608 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
1620 * HW as irrevisibly wedged, but keep enough state around that the 1616 * HW as irrevisibly wedged, but keep enough state around that the
1621 * driver doesn't explode during runtime. 1617 * driver doesn't explode during runtime.
1622 */ 1618 */
1623err_init_hw: 1619err_gt:
1624 mutex_unlock(&dev_priv->drm.struct_mutex); 1620 mutex_unlock(&dev_priv->drm.struct_mutex);
1625 1621
1626 i915_gem_set_wedged(dev_priv); 1622 i915_gem_set_wedged(dev_priv);
@@ -1630,6 +1626,7 @@ err_init_hw:
1630 i915_gem_drain_workqueue(dev_priv); 1626 i915_gem_drain_workqueue(dev_priv);
1631 1627
1632 mutex_lock(&dev_priv->drm.struct_mutex); 1628 mutex_lock(&dev_priv->drm.struct_mutex);
1629err_init_hw:
1633 intel_uc_fini_hw(dev_priv); 1630 intel_uc_fini_hw(dev_priv);
1634err_uc_init: 1631err_uc_init:
1635 intel_uc_fini(dev_priv); 1632 intel_uc_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8ab820145ea6..7015a97b1097 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1444,9 +1444,11 @@ unwind_pd:
1444 spin_lock(&pdp->lock); 1444 spin_lock(&pdp->lock);
1445 if (atomic_dec_and_test(&pd->used)) { 1445 if (atomic_dec_and_test(&pd->used)) {
1446 gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); 1446 gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
1447 pdp->entry[pdpe] = vm->scratch_pd;
1447 GEM_BUG_ON(!atomic_read(&pdp->used)); 1448 GEM_BUG_ON(!atomic_read(&pdp->used));
1448 atomic_dec(&pdp->used); 1449 atomic_dec(&pdp->used);
1449 free_pd(vm, pd); 1450 GEM_BUG_ON(alloc);
1451 alloc = pd; /* defer the free to after the lock */
1450 } 1452 }
1451 spin_unlock(&pdp->lock); 1453 spin_unlock(&pdp->lock);
1452unwind: 1454unwind:
@@ -1515,7 +1517,9 @@ unwind_pdp:
1515 spin_lock(&pml4->lock); 1517 spin_lock(&pml4->lock);
1516 if (atomic_dec_and_test(&pdp->used)) { 1518 if (atomic_dec_and_test(&pdp->used)) {
1517 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); 1519 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1518 free_pd(vm, pdp); 1520 pml4->entry[pml4e] = vm->scratch_pdp;
1521 GEM_BUG_ON(alloc);
1522 alloc = pdp; /* defer the free until after the lock */
1519 } 1523 }
1520 spin_unlock(&pml4->lock); 1524 spin_unlock(&pml4->lock);
1521unwind: 1525unwind:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b7e9fddef270..8bc76fcff70d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1194,6 +1194,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1194 switch (engine->id) { 1194 switch (engine->id) {
1195 default: 1195 default:
1196 MISSING_CASE(engine->id); 1196 MISSING_CASE(engine->id);
1197 /* fall through */
1197 case RCS0: 1198 case RCS0:
1198 mmio = RENDER_HWS_PGA_GEN7; 1199 mmio = RENDER_HWS_PGA_GEN7;
1199 break; 1200 break;
@@ -1417,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
1417 struct intel_engine_cs *engine = i915->engine[i]; 1418 struct intel_engine_cs *engine = i915->engine[i];
1418 struct drm_i915_error_engine *ee = &error->engine[i]; 1419 struct drm_i915_error_engine *ee = &error->engine[i];
1419 struct i915_request *request; 1420 struct i915_request *request;
1421 unsigned long flags;
1420 1422
1421 ee->engine_id = -1; 1423 ee->engine_id = -1;
1422 1424
@@ -1428,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
1428 error_record_engine_registers(error, engine, ee); 1430 error_record_engine_registers(error, engine, ee);
1429 error_record_engine_execlists(engine, ee); 1431 error_record_engine_execlists(engine, ee);
1430 1432
1433 spin_lock_irqsave(&engine->active.lock, flags);
1431 request = intel_engine_find_active_request(engine); 1434 request = intel_engine_find_active_request(engine);
1432 if (request) { 1435 if (request) {
1433 struct i915_gem_context *ctx = request->gem_context; 1436 struct i915_gem_context *ctx = request->gem_context;
1434 struct intel_ring *ring; 1437 struct intel_ring *ring = request->ring;
1435 1438
1436 ee->vm = ctx->vm ?: &ggtt->vm; 1439 ee->vm = ctx->vm ?: &ggtt->vm;
1437 1440
@@ -1461,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
1461 ee->rq_post = request->postfix; 1464 ee->rq_post = request->postfix;
1462 ee->rq_tail = request->tail; 1465 ee->rq_tail = request->tail;
1463 1466
1464 ring = request->ring;
1465 ee->cpu_ring_head = ring->head; 1467 ee->cpu_ring_head = ring->head;
1466 ee->cpu_ring_tail = ring->tail; 1468 ee->cpu_ring_tail = ring->tail;
1467 ee->ringbuffer = 1469 ee->ringbuffer =
@@ -1469,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
1469 1471
1470 engine_record_requests(engine, request, ee); 1472 engine_record_requests(engine, request, ee);
1471 } 1473 }
1474 spin_unlock_irqrestore(&engine->active.lock, flags);
1472 1475
1473 ee->hws_page = 1476 ee->hws_page =
1474 i915_error_object_create(i915, 1477 i915_error_object_create(i915,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a700c5c3d167..5140017f9a39 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
1567 } 1567 }
1568} 1568}
1569 1569
1570static int hsw_enable_metric_set(struct i915_perf_stream *stream) 1570static void delay_after_mux(void)
1571{ 1571{
1572 struct drm_i915_private *dev_priv = stream->dev_priv; 1572 /*
1573 const struct i915_oa_config *oa_config = stream->oa_config; 1573 * It apparently takes a fairly long time for a new MUX
1574
1575 /* PRM:
1576 *
1577 * OA unit is using “crclk†for its functionality. When trunk
1578 * level clock gating takes place, OA clock would be gated,
1579 * unable to count the events from non-render clock domain.
1580 * Render clock gating must be disabled when OA is enabled to
1581 * count the events from non-render domain. Unit level clock
1582 * gating for RCS should also be disabled.
1583 */
1584 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1585 ~GEN7_DOP_CLOCK_GATE_ENABLE));
1586 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1587 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1588
1589 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1590
1591 /* It apparently takes a fairly long time for a new MUX
1592 * configuration to be be applied after these register writes. 1574 * configuration to be be applied after these register writes.
1593 * This delay duration was derived empirically based on the 1575 * This delay duration was derived empirically based on the
1594 * render_basic config but hopefully it covers the maximum 1576 * render_basic config but hopefully it covers the maximum
@@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
1610 * a delay at this location would mitigate any invalid reports. 1592 * a delay at this location would mitigate any invalid reports.
1611 */ 1593 */
1612 usleep_range(15000, 20000); 1594 usleep_range(15000, 20000);
1595}
1596
1597static int hsw_enable_metric_set(struct i915_perf_stream *stream)
1598{
1599 struct drm_i915_private *dev_priv = stream->dev_priv;
1600 const struct i915_oa_config *oa_config = stream->oa_config;
1601
1602 /*
1603 * PRM:
1604 *
1605 * OA unit is using “crclk†for its functionality. When trunk
1606 * level clock gating takes place, OA clock would be gated,
1607 * unable to count the events from non-render clock domain.
1608 * Render clock gating must be disabled when OA is enabled to
1609 * count the events from non-render domain. Unit level clock
1610 * gating for RCS should also be disabled.
1611 */
1612 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1613 ~GEN7_DOP_CLOCK_GATE_ENABLE));
1614 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1615 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1616
1617 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1618 delay_after_mux();
1613 1619
1614 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1620 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1615 oa_config->b_counter_regs_len); 1621 oa_config->b_counter_regs_len);
@@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
1835 return ret; 1841 return ret;
1836 1842
1837 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1843 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1844 delay_after_mux();
1838 1845
1839 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1846 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1840 oa_config->b_counter_regs_len); 1847 oa_config->b_counter_regs_len);
@@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file)
2515 i915_perf_destroy_locked(stream); 2522 i915_perf_destroy_locked(stream);
2516 mutex_unlock(&dev_priv->perf.lock); 2523 mutex_unlock(&dev_priv->perf.lock);
2517 2524
2525 /* Release the reference the perf stream kept on the driver. */
2526 drm_dev_put(&dev_priv->drm);
2527
2518 return 0; 2528 return 0;
2519} 2529}
2520 2530
@@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
2650 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 2660 if (!(param->flags & I915_PERF_FLAG_DISABLED))
2651 i915_perf_enable_locked(stream); 2661 i915_perf_enable_locked(stream);
2652 2662
2663 /* Take a reference on the driver that will be kept with stream_fd
2664 * until its release.
2665 */
2666 drm_dev_get(&dev_priv->drm);
2667
2653 return stream_fd; 2668 return stream_fd;
2654 2669
2655err_open: 2670err_open:
@@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
3477 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3492 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3478 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; 3493 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
3479 3494
3480 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3495 if (IS_GEN(dev_priv, 10)) {
3481 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3496 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3482 3497 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3498 } else {
3499 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
3500 dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
3501 }
3483 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3502 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3484 } 3503 }
3485 } 3504 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f4ce643b3bc3..cce426b23a24 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -21,24 +21,22 @@
21/* watermark/fifo updates */ 21/* watermark/fifo updates */
22 22
23TRACE_EVENT(intel_pipe_enable, 23TRACE_EVENT(intel_pipe_enable,
24 TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), 24 TP_PROTO(struct intel_crtc *crtc),
25 TP_ARGS(dev_priv, pipe), 25 TP_ARGS(crtc),
26 26
27 TP_STRUCT__entry( 27 TP_STRUCT__entry(
28 __array(u32, frame, 3) 28 __array(u32, frame, 3)
29 __array(u32, scanline, 3) 29 __array(u32, scanline, 3)
30 __field(enum pipe, pipe) 30 __field(enum pipe, pipe)
31 ), 31 ),
32
33 TP_fast_assign( 32 TP_fast_assign(
34 enum pipe _pipe; 33 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
35 for_each_pipe(dev_priv, _pipe) { 34 struct intel_crtc *it__;
36 __entry->frame[_pipe] = 35 for_each_intel_crtc(&dev_priv->drm, it__) {
37 dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); 36 __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
38 __entry->scanline[_pipe] = 37 __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
39 intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
40 } 38 }
41 __entry->pipe = pipe; 39 __entry->pipe = crtc->pipe;
42 ), 40 ),
43 41
44 TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", 42 TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable,
49); 47);
50 48
51TRACE_EVENT(intel_pipe_disable, 49TRACE_EVENT(intel_pipe_disable,
52 TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), 50 TP_PROTO(struct intel_crtc *crtc),
53 TP_ARGS(dev_priv, pipe), 51 TP_ARGS(crtc),
54 52
55 TP_STRUCT__entry( 53 TP_STRUCT__entry(
56 __array(u32, frame, 3) 54 __array(u32, frame, 3)
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable,
59 ), 57 ),
60 58
61 TP_fast_assign( 59 TP_fast_assign(
62 enum pipe _pipe; 60 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
63 for_each_pipe(dev_priv, _pipe) { 61 struct intel_crtc *it__;
64 __entry->frame[_pipe] = 62 for_each_intel_crtc(&dev_priv->drm, it__) {
65 dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); 63 __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
66 __entry->scanline[_pipe] = 64 __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
67 intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
68 } 65 }
69 __entry->pipe = pipe; 66 __entry->pipe = crtc->pipe;
70 ), 67 ),
71 68
72 TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", 69 TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc,
89 86
90 TP_fast_assign( 87 TP_fast_assign(
91 __entry->pipe = crtc->pipe; 88 __entry->pipe = crtc->pipe;
92 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 89 __entry->frame = intel_crtc_get_vblank_counter(crtc);
93 crtc->pipe);
94 __entry->scanline = intel_get_crtc_scanline(crtc); 90 __entry->scanline = intel_get_crtc_scanline(crtc);
95 memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); 91 memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
96 ), 92 ),
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
112 ), 108 ),
113 109
114 TP_fast_assign( 110 TP_fast_assign(
111 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
115 __entry->pipe = pipe; 112 __entry->pipe = pipe;
116 __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); 113 __entry->frame = intel_crtc_get_vblank_counter(crtc);
117 __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); 114 __entry->scanline = intel_get_crtc_scanline(crtc);
118 ), 115 ),
119 116
120 TP_printk("pipe %c, frame=%u, scanline=%u", 117 TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun,
134 131
135 TP_fast_assign( 132 TP_fast_assign(
136 enum pipe pipe = pch_transcoder; 133 enum pipe pipe = pch_transcoder;
134 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
137 __entry->pipe = pipe; 135 __entry->pipe = pipe;
138 __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); 136 __entry->frame = intel_crtc_get_vblank_counter(crtc);
139 __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); 137 __entry->scanline = intel_get_crtc_scanline(crtc);
140 ), 138 ),
141 139
142 TP_printk("pch transcoder %c, frame=%u, scanline=%u", 140 TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr,
156 ), 154 ),
157 155
158 TP_fast_assign( 156 TP_fast_assign(
159 enum pipe pipe; 157 struct intel_crtc *crtc;
160 for_each_pipe(dev_priv, pipe) { 158 for_each_intel_crtc(&dev_priv->drm, crtc) {
161 __entry->frame[pipe] = 159 __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
162 dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); 160 __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
163 __entry->scanline[pipe] =
164 intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
165 } 161 }
166 __entry->old = old; 162 __entry->old = old;
167 __entry->new = new; 163 __entry->new = new;
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm,
198 194
199 TP_fast_assign( 195 TP_fast_assign(
200 __entry->pipe = crtc->pipe; 196 __entry->pipe = crtc->pipe;
201 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 197 __entry->frame = intel_crtc_get_vblank_counter(crtc);
202 crtc->pipe);
203 __entry->scanline = intel_get_crtc_scanline(crtc); 198 __entry->scanline = intel_get_crtc_scanline(crtc);
204 __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; 199 __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
205 __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; 200 __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm,
243 238
244 TP_fast_assign( 239 TP_fast_assign(
245 __entry->pipe = crtc->pipe; 240 __entry->pipe = crtc->pipe;
246 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 241 __entry->frame = intel_crtc_get_vblank_counter(crtc);
247 crtc->pipe);
248 __entry->scanline = intel_get_crtc_scanline(crtc); 242 __entry->scanline = intel_get_crtc_scanline(crtc);
249 __entry->level = wm->level; 243 __entry->level = wm->level;
250 __entry->cxsr = wm->cxsr; 244 __entry->cxsr = wm->cxsr;
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size,
278 272
279 TP_fast_assign( 273 TP_fast_assign(
280 __entry->pipe = crtc->pipe; 274 __entry->pipe = crtc->pipe;
281 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 275 __entry->frame = intel_crtc_get_vblank_counter(crtc);
282 crtc->pipe);
283 __entry->scanline = intel_get_crtc_scanline(crtc); 276 __entry->scanline = intel_get_crtc_scanline(crtc);
284 __entry->sprite0_start = sprite0_start; 277 __entry->sprite0_start = sprite0_start;
285 __entry->sprite1_start = sprite1_start; 278 __entry->sprite1_start = sprite1_start;
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane,
310 TP_fast_assign( 303 TP_fast_assign(
311 __entry->pipe = crtc->pipe; 304 __entry->pipe = crtc->pipe;
312 __entry->name = plane->name; 305 __entry->name = plane->name;
313 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 306 __entry->frame = intel_crtc_get_vblank_counter(crtc);
314 crtc->pipe);
315 __entry->scanline = intel_get_crtc_scanline(crtc); 307 __entry->scanline = intel_get_crtc_scanline(crtc);
316 memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); 308 memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
317 memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); 309 memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane,
338 TP_fast_assign( 330 TP_fast_assign(
339 __entry->pipe = crtc->pipe; 331 __entry->pipe = crtc->pipe;
340 __entry->name = plane->name; 332 __entry->name = plane->name;
341 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 333 __entry->frame = intel_crtc_get_vblank_counter(crtc);
342 crtc->pipe);
343 __entry->scanline = intel_get_crtc_scanline(crtc); 334 __entry->scanline = intel_get_crtc_scanline(crtc);
344 ), 335 ),
345 336
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start,
364 355
365 TP_fast_assign( 356 TP_fast_assign(
366 __entry->pipe = crtc->pipe; 357 __entry->pipe = crtc->pipe;
367 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, 358 __entry->frame = intel_crtc_get_vblank_counter(crtc);
368 crtc->pipe);
369 __entry->scanline = intel_get_crtc_scanline(crtc); 359 __entry->scanline = intel_get_crtc_scanline(crtc);
370 __entry->min = crtc->debug.min_vbl; 360 __entry->min = crtc->debug.min_vbl;
371 __entry->max = crtc->debug.max_vbl; 361 __entry->max = crtc->debug.max_vbl;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..724627afdedc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
101static void vgt_deballoon_space(struct i915_ggtt *ggtt, 101static void vgt_deballoon_space(struct i915_ggtt *ggtt,
102 struct drm_mm_node *node) 102 struct drm_mm_node *node)
103{ 103{
104 if (!drm_mm_node_allocated(node))
105 return;
106
104 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", 107 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
105 node->start, 108 node->start,
106 node->start + node->size, 109 node->start + node->size,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..f11979879e7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -829,7 +829,7 @@ struct intel_crtc_state {
829 829
830 /* 830 /*
831 * Frequence the dpll for the port should run at. Differs from the 831 * Frequence the dpll for the port should run at. Differs from the
832 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 832 * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
833 * already multiplied by pixel_multiplier. 833 * already multiplied by pixel_multiplier.
834 */ 834 */
835 int port_clock; 835 int port_clock;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 502c54428570..8d1aebc3e857 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
221static void 221static void
222dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) 222dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
223{ 223{
224 struct drm_printer p; 224 if (debug->count) {
225 struct drm_printer p = drm_debug_printer("i915");
225 226
226 if (!debug->count) 227 __print_intel_runtime_pm_wakeref(&p, debug);
227 return; 228 }
228
229 p = drm_debug_printer("i915");
230 __print_intel_runtime_pm_wakeref(&p, debug);
231 229
232 kfree(debug->owners); 230 kfree(debug->owners);
233} 231}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 9cbb2ebf575b..38275310b196 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -66,6 +66,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
66} 66}
67 67
68/** 68/**
69 * intel_wakeref_get_if_in_use: Acquire the wakeref
70 * @wf: the wakeref
71 *
72 * Acquire a hold on the wakeref, but only if the wakeref is already
73 * active.
74 *
75 * Returns: true if the wakeref was acquired, false otherwise.
76 */
77static inline bool
78intel_wakeref_get_if_active(struct intel_wakeref *wf)
79{
80 return atomic_inc_not_zero(&wf->count);
81}
82
83/**
69 * intel_wakeref_put: Release the wakeref 84 * intel_wakeref_put: Release the wakeref
70 * @i915: the drm_i915_private device 85 * @i915: the drm_i915_private device
71 * @wf: the wakeref 86 * @wf: the wakeref
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..945bc20f1d33 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,6 +17,7 @@
17#include <linux/of_address.h> 17#include <linux/of_address.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/dma-mapping.h>
20 21
21#include "mtk_drm_crtc.h" 22#include "mtk_drm_crtc.h"
22#include "mtk_drm_ddp.h" 23#include "mtk_drm_ddp.h"
@@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
213 struct mtk_drm_private *private = drm->dev_private; 214 struct mtk_drm_private *private = drm->dev_private;
214 struct platform_device *pdev; 215 struct platform_device *pdev;
215 struct device_node *np; 216 struct device_node *np;
217 struct device *dma_dev;
216 int ret; 218 int ret;
217 219
218 if (!iommu_present(&platform_bus_type)) 220 if (!iommu_present(&platform_bus_type))
@@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
275 goto err_component_unbind; 277 goto err_component_unbind;
276 } 278 }
277 279
278 private->dma_dev = &pdev->dev; 280 dma_dev = &pdev->dev;
281 private->dma_dev = dma_dev;
282
283 /*
284 * Configure the DMA segment size to make sure we get contiguous IOVA
285 * when importing PRIME buffers.
286 */
287 if (!dma_dev->dma_parms) {
288 private->dma_parms_allocated = true;
289 dma_dev->dma_parms =
290 devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
291 GFP_KERNEL);
292 }
293 if (!dma_dev->dma_parms) {
294 ret = -ENOMEM;
295 goto err_component_unbind;
296 }
297
298 ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
299 if (ret) {
300 dev_err(dma_dev, "Failed to set DMA segment size\n");
301 goto err_unset_dma_parms;
302 }
279 303
280 /* 304 /*
281 * We don't use the drm_irq_install() helpers provided by the DRM 305 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
285 drm->irq_enabled = true; 309 drm->irq_enabled = true;
286 ret = drm_vblank_init(drm, MAX_CRTC); 310 ret = drm_vblank_init(drm, MAX_CRTC);
287 if (ret < 0) 311 if (ret < 0)
288 goto err_component_unbind; 312 goto err_unset_dma_parms;
289 313
290 drm_kms_helper_poll_init(drm); 314 drm_kms_helper_poll_init(drm);
291 drm_mode_config_reset(drm); 315 drm_mode_config_reset(drm);
292 316
293 return 0; 317 return 0;
294 318
319err_unset_dma_parms:
320 if (private->dma_parms_allocated)
321 dma_dev->dma_parms = NULL;
295err_component_unbind: 322err_component_unbind:
296 component_unbind_all(drm->dev, drm); 323 component_unbind_all(drm->dev, drm);
297err_config_cleanup: 324err_config_cleanup:
@@ -302,9 +329,14 @@ err_config_cleanup:
302 329
303static void mtk_drm_kms_deinit(struct drm_device *drm) 330static void mtk_drm_kms_deinit(struct drm_device *drm)
304{ 331{
332 struct mtk_drm_private *private = drm->dev_private;
333
305 drm_kms_helper_poll_fini(drm); 334 drm_kms_helper_poll_fini(drm);
306 drm_atomic_helper_shutdown(drm); 335 drm_atomic_helper_shutdown(drm);
307 336
337 if (private->dma_parms_allocated)
338 private->dma_dev->dma_parms = NULL;
339
308 component_unbind_all(drm->dev, drm); 340 component_unbind_all(drm->dev, drm);
309 drm_mode_config_cleanup(drm); 341 drm_mode_config_cleanup(drm);
310} 342}
@@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
320 .compat_ioctl = drm_compat_ioctl, 352 .compat_ioctl = drm_compat_ioctl,
321}; 353};
322 354
355/*
356 * We need to override this because the device used to import the memory is
357 * not dev->dev, as drm_gem_prime_import() expects.
358 */
359struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
360 struct dma_buf *dma_buf)
361{
362 struct mtk_drm_private *private = dev->dev_private;
363
364 return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
365}
366
323static struct drm_driver mtk_drm_driver = { 367static struct drm_driver mtk_drm_driver = {
324 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 368 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
325 DRIVER_ATOMIC, 369 DRIVER_ATOMIC,
@@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
331 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 375 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
332 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 376 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
333 .gem_prime_export = drm_gem_prime_export, 377 .gem_prime_export = drm_gem_prime_export,
334 .gem_prime_import = drm_gem_prime_import, 378 .gem_prime_import = mtk_drm_gem_prime_import,
335 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, 379 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
336 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, 380 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
337 .gem_prime_mmap = mtk_drm_gem_mmap_buf, 381 .gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
524 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); 568 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
525 if (!comp) { 569 if (!comp) {
526 ret = -ENOMEM; 570 ret = -ENOMEM;
571 of_node_put(node);
527 goto err_node; 572 goto err_node;
528 } 573 }
529 574
530 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); 575 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
531 if (ret) 576 if (ret) {
577 of_node_put(node);
532 goto err_node; 578 goto err_node;
579 }
533 580
534 private->ddp_comp[comp_id] = comp; 581 private->ddp_comp[comp_id] = comp;
535 } 582 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..e03fea12ff59 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -51,6 +51,8 @@ struct mtk_drm_private {
51 } commit; 51 } commit;
52 52
53 struct drm_atomic_state *suspend_state; 53 struct drm_atomic_state *suspend_state;
54
55 bool dma_parms_allocated;
54}; 56};
55 57
56extern struct platform_driver mtk_ddp_driver; 58extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 1671db47aa57..e9c55d1d6c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
59 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 59 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
60 if (priv->lastctx == ctx) 60 if (priv->lastctx == ctx)
61 break; 61 break;
62 /* fall-thru */
62 case MSM_SUBMIT_CMD_BUF: 63 case MSM_SUBMIT_CMD_BUF:
63 /* copy commands into RB: */ 64 /* copy commands into RB: */
64 obj = submit->bos[submit->cmd[i].idx].obj; 65 obj = submit->bos[submit->cmd[i].idx].obj;
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
149 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 150 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150 if (priv->lastctx == ctx) 151 if (priv->lastctx == ctx)
151 break; 152 break;
153 /* fall-thru */
152 case MSM_SUBMIT_CMD_BUF: 154 case MSM_SUBMIT_CMD_BUF:
153 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); 155 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
154 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); 156 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index be39cf01e51e..dc8ec2c94301 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
115 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 115 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
116 if (priv->lastctx == ctx) 116 if (priv->lastctx == ctx)
117 break; 117 break;
118 /* fall-thru */
118 case MSM_SUBMIT_CMD_BUF: 119 case MSM_SUBMIT_CMD_BUF:
119 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); 120 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
120 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); 121 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 9acbbc0f3232..048c8be426f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
428 /* ignore if there has not been a ctx switch: */ 428 /* ignore if there has not been a ctx switch: */
429 if (priv->lastctx == ctx) 429 if (priv->lastctx == ctx)
430 break; 430 break;
431 /* fall-thru */
431 case MSM_SUBMIT_CMD_BUF: 432 case MSM_SUBMIT_CMD_BUF:
432 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? 433 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
433 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); 434 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 0e2f74163a16..0aa8a12c9952 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2221 if (ret) 2221 if (ret)
2222 goto fail; 2222 goto fail;
2223 2223
2224 spin_lock_init(&dpu_enc->enc_spinlock);
2225
2226 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2224 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2227 timer_setup(&dpu_enc->frame_done_timer, 2225 timer_setup(&dpu_enc->frame_done_timer,
2228 dpu_encoder_frame_done_timeout, 0); 2226 dpu_encoder_frame_done_timeout, 0);
@@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2276 2274
2277 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2275 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2278 2276
2277 spin_lock_init(&dpu_enc->enc_spinlock);
2279 dpu_enc->enabled = false; 2278 dpu_enc->enabled = false;
2280 2279
2281 return &dpu_enc->base; 2280 return &dpu_enc->base;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index ff14555372d0..78d5fa230c16 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
439 mdp5_crtc->enabled = false; 439 mdp5_crtc->enabled = false;
440} 440}
441 441
442static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
443{
444 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
445 struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
446 u32 count;
447
448 count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
449 drm_crtc_set_max_vblank_count(crtc, count);
450
451 drm_crtc_vblank_on(crtc);
452}
453
442static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, 454static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
443 struct drm_crtc_state *old_state) 455 struct drm_crtc_state *old_state)
444{ 456{
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
475 } 487 }
476 488
477 /* Restore vblank irq handling after power is enabled */ 489 /* Restore vblank irq handling after power is enabled */
478 drm_crtc_vblank_on(crtc); 490 mdp5_crtc_vblank_on(crtc);
479 491
480 mdp5_crtc_mode_set_nofb(crtc); 492 mdp5_crtc_mode_set_nofb(crtc);
481 493
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
1028 mdp5_crtc_destroy_state(crtc, crtc->state); 1040 mdp5_crtc_destroy_state(crtc, crtc->state);
1029 1041
1030 __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); 1042 __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
1043
1044 drm_crtc_vblank_reset(crtc);
1031} 1045}
1032 1046
1033static const struct drm_crtc_funcs mdp5_crtc_funcs = { 1047static const struct drm_crtc_funcs mdp5_crtc_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 4a60f5fca6b0..fec6ef1ae3b9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
740 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 740 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
741 dev->driver->get_scanout_position = mdp5_get_scanoutpos; 741 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
742 dev->driver->get_vblank_counter = mdp5_get_vblank_counter; 742 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
743 dev->max_vblank_count = 0xffffffff; 743 dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
744 dev->vblank_disable_immediate = true; 744 dev->vblank_disable_immediate = true;
745 745
746 return kms; 746 return kms;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ab64ab470de7..c356f5ccf253 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
619 619
620 msm_submitqueue_init(dev, ctx); 620 msm_submitqueue_init(dev, ctx);
621 621
622 ctx->aspace = priv->gpu->aspace; 622 ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
623 file->driver_priv = ctx; 623 file->driver_priv = ctx;
624 624
625 return 0; 625 return 0;
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
1279 if (!np) 1279 if (!np)
1280 return 0; 1280 return 0;
1281 1281
1282 drm_of_component_match_add(dev, matchptr, compare_of, np); 1282 if (of_device_is_available(np))
1283 drm_of_component_match_add(dev, matchptr, compare_of, np);
1283 1284
1284 of_node_put(np); 1285 of_node_put(np);
1285 1286
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8b78554cfde3..8cf6362e64bf 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
32 return !msm_obj->vram_node; 32 return !msm_obj->vram_node;
33} 33}
34 34
35/*
36 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
37 * API. Really GPU cache is out of scope here (handled on cmdstream)
38 * and all we need to do is invalidate newly allocated pages before
39 * mapping to CPU as uncached/writecombine.
40 *
41 * On top of this, we have the added headache, that depending on
42 * display generation, the display's iommu may be wired up to either
43 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
44 * that here we either have dma-direct or iommu ops.
45 *
46 * Let this be a cautionary tail of abstraction gone wrong.
47 */
48
49static void sync_for_device(struct msm_gem_object *msm_obj)
50{
51 struct device *dev = msm_obj->base.dev->dev;
52
53 if (get_dma_ops(dev)) {
54 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 } else {
57 dma_map_sg(dev, msm_obj->sgt->sgl,
58 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
59 }
60}
61
62static void sync_for_cpu(struct msm_gem_object *msm_obj)
63{
64 struct device *dev = msm_obj->base.dev->dev;
65
66 if (get_dma_ops(dev)) {
67 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
68 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
69 } else {
70 dma_unmap_sg(dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 }
73}
74
35/* allocate pages from VRAM carveout, used when no IOMMU: */ 75/* allocate pages from VRAM carveout, used when no IOMMU: */
36static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 76static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
37{ 77{
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
97 * because display controller, GPU, etc. are not coherent: 137 * because display controller, GPU, etc. are not coherent:
98 */ 138 */
99 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 139 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
100 dma_map_sg(dev->dev, msm_obj->sgt->sgl, 140 sync_for_device(msm_obj);
101 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
102 } 141 }
103 142
104 return msm_obj->pages; 143 return msm_obj->pages;
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
127 * GPU, etc. are not coherent: 166 * GPU, etc. are not coherent:
128 */ 167 */
129 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 168 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
130 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 169 sync_for_cpu(msm_obj);
131 msm_obj->sgt->nents,
132 DMA_BIDIRECTIONAL);
133 170
134 sg_free_table(msm_obj->sgt); 171 sg_free_table(msm_obj->sgt);
135 kfree(msm_obj->sgt); 172 kfree(msm_obj->sgt);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8497768f1b41..5c36c75232e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
772 int slots; 772 int slots;
773 773
774 /* When restoring duplicated states, we need to make sure that the 774 if (crtc_state->mode_changed || crtc_state->connectors_changed) {
775 * bw remains the same and avoid recalculating it, as the connector's 775 /*
776 * bpc may have changed after the state was duplicated 776 * When restoring duplicated states, we need to make sure that
777 */ 777 * the bw remains the same and avoid recalculating it, as the
778 if (!state->duplicated) 778 * connector's bpc may have changed after the state was
779 asyh->dp.pbn = 779 * duplicated
780 drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, 780 */
781 connector->display_info.bpc * 3); 781 if (!state->duplicated) {
782 const int bpp = connector->display_info.bpc * 3;
783 const int clock = crtc_state->adjusted_mode.clock;
784
785 asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
786 }
782 787
783 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
784 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, 788 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
785 mstc->port, 789 mstc->port,
786 asyh->dp.pbn); 790 asyh->dp.pbn);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8c92374afcf2..a835cebb6d90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
475 fault->inst, fault->addr, fault->access); 475 fault->inst, fault->addr, fault->access);
476} 476}
477 477
478static inline bool
479nouveau_range_done(struct hmm_range *range)
480{
481 bool ret = hmm_range_valid(range);
482
483 hmm_range_unregister(range);
484 return ret;
485}
486
487static int
488nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
489{
490 long ret;
491
492 range->default_flags = 0;
493 range->pfn_flags_mask = -1UL;
494
495 ret = hmm_range_register(range, mirror,
496 range->start, range->end,
497 PAGE_SHIFT);
498 if (ret) {
499 up_read(&range->vma->vm_mm->mmap_sem);
500 return (int)ret;
501 }
502
503 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
504 up_read(&range->vma->vm_mm->mmap_sem);
505 return -EAGAIN;
506 }
507
508 ret = hmm_range_fault(range, true);
509 if (ret <= 0) {
510 if (ret == 0)
511 ret = -EBUSY;
512 up_read(&range->vma->vm_mm->mmap_sem);
513 hmm_range_unregister(range);
514 return ret;
515 }
516 return 0;
517}
518
478static int 519static int
479nouveau_svm_fault(struct nvif_notify *notify) 520nouveau_svm_fault(struct nvif_notify *notify)
480{ 521{
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
649 range.values = nouveau_svm_pfn_values; 690 range.values = nouveau_svm_pfn_values;
650 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; 691 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
651again: 692again:
652 ret = hmm_vma_fault(&svmm->mirror, &range, true); 693 ret = nouveau_range_fault(&svmm->mirror, &range);
653 if (ret == 0) { 694 if (ret == 0) {
654 mutex_lock(&svmm->mutex); 695 mutex_lock(&svmm->mutex);
655 if (!hmm_vma_range_done(&range)) { 696 if (!nouveau_range_done(&range)) {
656 mutex_unlock(&svmm->mutex); 697 mutex_unlock(&svmm->mutex);
657 goto again; 698 goto again;
658 } 699 }
@@ -666,8 +707,8 @@ again:
666 NULL); 707 NULL);
667 svmm->vmm->vmm.object.client->super = false; 708 svmm->vmm->vmm.object.client->super = false;
668 mutex_unlock(&svmm->mutex); 709 mutex_unlock(&svmm->mutex);
710 up_read(&svmm->mm->mmap_sem);
669 } 711 }
670 up_read(&svmm->mm->mmap_sem);
671 712
672 /* Cancel any faults in the window whose pages didn't manage 713 /* Cancel any faults in the window whose pages didn't manage
673 * to keep their valid bit, or stay writeable when required. 714 * to keep their valid bit, or stay writeable when required.
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
40 u8 *ptr = msg->buf; 40 u8 *ptr = msg->buf;
41 41
42 while (remaining) { 42 while (remaining) {
43 u8 cnt = (remaining > 16) ? 16 : remaining; 43 u8 cnt, retries, cmd;
44 u8 cmd;
45 44
46 if (msg->flags & I2C_M_RD) 45 if (msg->flags & I2C_M_RD)
47 cmd = 1; 46 cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51 if (mcnt || remaining > 16) 50 if (mcnt || remaining > 16)
52 cmd |= 4; /* MOT */ 51 cmd |= 4; /* MOT */
53 52
54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); 53 for (retries = 0, cnt = 0;
55 if (ret < 0) { 54 retries < 32 && !cnt;
56 nvkm_i2c_aux_release(aux); 55 retries++) {
57 return ret; 56 cnt = min_t(u8, remaining, 16);
57 ret = aux->func->xfer(aux, true, cmd,
58 msg->addr, ptr, &cnt);
59 if (ret < 0)
60 goto out;
61 }
62 if (!cnt) {
63 AUX_TRACE(aux, "no data after 32 retries");
64 ret = -EIO;
65 goto out;
58 } 66 }
59 67
60 ptr += cnt; 68 ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
64 msg++; 72 msg++;
65 } 73 }
66 74
75 ret = num;
76out:
67 nvkm_i2c_aux_release(aux); 77 nvkm_i2c_aux_release(aux);
68 return num; 78 return ret;
69} 79}
70 80
71static u32 81static u32
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index de0f882f0f7b..14b41de44ebc 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,6 +4,7 @@
4 * Author: Archit Taneja <archit@ti.com> 4 * Author: Archit Taneja <archit@ti.com>
5 */ 5 */
6 6
7#include <linux/bitops.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/platform_device.h> 10#include <linux/platform_device.h>
@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
20{ 21{
21 struct device_node *remote_node; 22 struct device_node *remote_node;
22 23
23 remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); 24 remote_node = of_graph_get_remote_node(out->dev->of_node,
25 ffs(out->of_ports) - 1, 0);
24 if (!remote_node) { 26 if (!remote_node) {
25 dev_dbg(out->dev, "failed to find video sink\n"); 27 dev_dbg(out->dev, "failed to find video sink\n");
26 return 0; 28 return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..1bad0a2cc5c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
669 if (omapdss_is_initialized() == false) 669 if (omapdss_is_initialized() == false)
670 return -EPROBE_DEFER; 670 return -EPROBE_DEFER;
671 671
672 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 672 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
673 if (ret) { 673 if (ret) {
674 dev_err(&pdev->dev, "Failed to set the DMA mask\n"); 674 dev_err(&pdev->dev, "Failed to set the DMA mask\n");
675 return ret; 675 return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..952201c6d821 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
59static struct drm_driver qxl_driver; 59static struct drm_driver qxl_driver;
60static struct pci_driver qxl_pci_driver; 60static struct pci_driver qxl_pci_driver;
61 61
62static bool is_vga(struct pci_dev *pdev)
63{
64 return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
65}
66
62static int 67static int
63qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 68qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64{ 69{
@@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
83 if (ret) 88 if (ret)
84 goto disable_pci; 89 goto disable_pci;
85 90
91 if (is_vga(pdev)) {
92 ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
93 if (ret) {
94 DRM_ERROR("can't get legacy vga ioports\n");
95 goto disable_pci;
96 }
97 }
98
86 ret = qxl_device_init(qdev, &qxl_driver, pdev); 99 ret = qxl_device_init(qdev, &qxl_driver, pdev);
87 if (ret) 100 if (ret)
88 goto disable_pci; 101 goto put_vga;
89 102
90 ret = qxl_modeset_init(qdev); 103 ret = qxl_modeset_init(qdev);
91 if (ret) 104 if (ret)
@@ -105,6 +118,9 @@ modeset_cleanup:
105 qxl_modeset_fini(qdev); 118 qxl_modeset_fini(qdev);
106unload: 119unload:
107 qxl_device_fini(qdev); 120 qxl_device_fini(qdev);
121put_vga:
122 if (is_vga(pdev))
123 vga_put(pdev, VGA_RSRC_LEGACY_IO);
108disable_pci: 124disable_pci:
109 pci_disable_device(pdev); 125 pci_disable_device(pdev);
110free_dev: 126free_dev:
@@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev)
122 138
123 qxl_modeset_fini(qdev); 139 qxl_modeset_fini(qdev);
124 qxl_device_fini(qdev); 140 qxl_device_fini(qdev);
141 if (is_vga(pdev))
142 vga_put(pdev, VGA_RSRC_LEGACY_IO);
125 143
126 dev->dev_private = NULL; 144 dev->dev_private = NULL;
127 kfree(qdev); 145 kfree(qdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..082d02c84024 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
673 673
674 /* Locate the companion LVDS encoder for dual-link operation, if any. */ 674 /* Locate the companion LVDS encoder for dual-link operation, if any. */
675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); 675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
676 if (!companion) { 676 if (!companion)
677 dev_err(dev, "Companion LVDS encoder not found\n"); 677 return 0;
678 return -ENXIO;
679 }
680 678
681 /* 679 /*
682 * Sanity check: the companion encoder must have the same compatible 680 * Sanity check: the companion encoder must have the same compatible
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 95e5c517a15f..9aae3d8e99ef 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev)
432 432
433static const struct dev_pm_ops rockchip_dp_pm_ops = { 433static const struct dev_pm_ops rockchip_dp_pm_ops = {
434#ifdef CONFIG_PM_SLEEP 434#ifdef CONFIG_PM_SLEEP
435 .suspend = rockchip_dp_suspend, 435 .suspend_late = rockchip_dp_suspend,
436 .resume_early = rockchip_dp_resume, 436 .resume_early = rockchip_dp_resume,
437#endif 437#endif
438}; 438};
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..671c90f34ede 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
95 rmb(); /* for list_empty to work without lock */ 95 rmb(); /* for list_empty to work without lock */
96 96
97 if (list_empty(&entity->list) || 97 if (list_empty(&entity->list) ||
98 spsc_queue_peek(&entity->job_queue) == NULL) 98 spsc_queue_count(&entity->job_queue) == 0)
99 return true; 99 return true;
100 100
101 return false; 101 return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
281 /* Consumption of existing IBs wasn't completed. Forcefully 281 /* Consumption of existing IBs wasn't completed. Forcefully
282 * remove them here. 282 * remove them here.
283 */ 283 */
284 if (spsc_queue_peek(&entity->job_queue)) { 284 if (spsc_queue_count(&entity->job_queue)) {
285 if (sched) { 285 if (sched) {
286 /* Park the kernel for a moment to make sure it isn't processing 286 /* Park the kernel for a moment to make sure it isn't processing
287 * our enity. 287 * our enity.
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..df0cc8f46d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
314 /* R and B components are only 5 bits deep */ 314 /* R and B components are only 5 bits deep */
315 val |= SUN4I_TCON0_FRM_CTL_MODE_R; 315 val |= SUN4I_TCON0_FRM_CTL_MODE_R;
316 val |= SUN4I_TCON0_FRM_CTL_MODE_B; 316 val |= SUN4I_TCON0_FRM_CTL_MODE_B;
317 /* Fall through */
317 case MEDIA_BUS_FMT_RGB666_1X18: 318 case MEDIA_BUS_FMT_RGB666_1X18:
318 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: 319 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
319 /* Fall through: enable dithering */ 320 /* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..b889ad3e86e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
993 ret = sun6i_dsi_dcs_read(dsi, msg); 993 ret = sun6i_dsi_dcs_read(dsi, msg);
994 break; 994 break;
995 } 995 }
996 /* Else, fall through */
996 997
997 default: 998 default:
998 ret = -EINVAL; 999 ret = -EINVAL;
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 274cb955e2e1..bdcaa4c7168c 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -126,8 +126,12 @@ int tegra_output_probe(struct tegra_output *output)
126 "nvidia,hpd-gpio", 0, 126 "nvidia,hpd-gpio", 0,
127 GPIOD_IN, 127 GPIOD_IN,
128 "HDMI hotplug detect"); 128 "HDMI hotplug detect");
129 if (IS_ERR(output->hpd_gpio)) 129 if (IS_ERR(output->hpd_gpio)) {
130 return PTR_ERR(output->hpd_gpio); 130 if (PTR_ERR(output->hpd_gpio) != -ENOENT)
131 return PTR_ERR(output->hpd_gpio);
132
133 output->hpd_gpio = NULL;
134 }
131 135
132 if (output->hpd_gpio) { 136 if (output->hpd_gpio) {
133 err = gpiod_to_irq(output->hpd_gpio); 137 err = gpiod_to_irq(output->hpd_gpio);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index d594f7520b7b..7d78e6deac89 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
285 285
286static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) 286static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
287{ 287{
288 unsigned long attrs = 0;
288 dma_addr_t dma = d_page->dma; 289 dma_addr_t dma = d_page->dma;
289 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; 290 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
290 dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); 291 if (pool->type & IS_HUGE)
292 attrs = DMA_ATTR_NO_WARN;
293
294 dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
291 295
292 kfree(d_page); 296 kfree(d_page);
293 d_page = NULL; 297 d_page = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..09b526518f5a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
204 .interruptible = false, 204 .interruptible = false,
205 .no_wait_gpu = false 205 .no_wait_gpu = false
206 }; 206 };
207 size_t max_segment;
207 208
208 /* wtf swapping */ 209 /* wtf swapping */
209 if (bo->pages) 210 if (bo->pages)
@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
215 if (!bo->pages) 216 if (!bo->pages)
216 goto out; 217 goto out;
217 218
218 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, 219 max_segment = virtio_max_dma_size(qdev->vdev);
219 nr_pages << PAGE_SHIFT, GFP_KERNEL); 220 max_segment &= PAGE_MASK;
221 if (max_segment > SCATTERLIST_MAX_SEGMENT)
222 max_segment = SCATTERLIST_MAX_SEGMENT;
223 ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
224 nr_pages << PAGE_SHIFT,
225 max_segment, GFP_KERNEL);
220 if (ret) 226 if (ret)
221 goto out; 227 goto out;
222 return 0; 228 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e4e09d47c5c0..59e9d05ab928 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
389 break; 389 break;
390 } 390 }
391 391
392 if (retries == RETRIES) 392 if (retries == RETRIES) {
393 kfree(reply);
393 return -EINVAL; 394 return -EINVAL;
395 }
394 396
395 *msg_len = reply_len; 397 *msg_len = reply_len;
396 *msg = reply; 398 *msg = reply;
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 98bf694626f7..3a8c4a5971f7 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -23,12 +23,36 @@
23#define A4_2WHEEL_MOUSE_HACK_7 0x01 23#define A4_2WHEEL_MOUSE_HACK_7 0x01
24#define A4_2WHEEL_MOUSE_HACK_B8 0x02 24#define A4_2WHEEL_MOUSE_HACK_B8 0x02
25 25
26#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
27
26struct a4tech_sc { 28struct a4tech_sc {
27 unsigned long quirks; 29 unsigned long quirks;
28 unsigned int hw_wheel; 30 unsigned int hw_wheel;
29 __s32 delayed_value; 31 __s32 delayed_value;
30}; 32};
31 33
34static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
35 struct hid_field *field, struct hid_usage *usage,
36 unsigned long **bit, int *max)
37{
38 struct a4tech_sc *a4 = hid_get_drvdata(hdev);
39
40 if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
41 usage->hid == A4_WHEEL_ORIENTATION) {
42 /*
43 * We do not want to have this usage mapped to anything as it's
44 * nonstandard and doesn't really behave like an HID report.
45 * It's only selecting the orientation (vertical/horizontal) of
46 * the previous mouse wheel report. The input_events will be
47 * generated once both reports are recorded in a4_event().
48 */
49 return -1;
50 }
51
52 return 0;
53
54}
55
32static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, 56static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
33 struct hid_field *field, struct hid_usage *usage, 57 struct hid_field *field, struct hid_usage *usage,
34 unsigned long **bit, int *max) 58 unsigned long **bit, int *max)
@@ -52,8 +76,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
52 struct a4tech_sc *a4 = hid_get_drvdata(hdev); 76 struct a4tech_sc *a4 = hid_get_drvdata(hdev);
53 struct input_dev *input; 77 struct input_dev *input;
54 78
55 if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || 79 if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
56 !usage->type)
57 return 0; 80 return 0;
58 81
59 input = field->hidinput->input; 82 input = field->hidinput->input;
@@ -64,7 +87,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
64 return 1; 87 return 1;
65 } 88 }
66 89
67 if (usage->hid == 0x000100b8) { 90 if (usage->hid == A4_WHEEL_ORIENTATION) {
68 input_event(input, EV_REL, value ? REL_HWHEEL : 91 input_event(input, EV_REL, value ? REL_HWHEEL :
69 REL_WHEEL, a4->delayed_value); 92 REL_WHEEL, a4->delayed_value);
70 input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES : 93 input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
@@ -131,6 +154,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
131static struct hid_driver a4_driver = { 154static struct hid_driver a4_driver = {
132 .name = "a4tech", 155 .name = "a4tech",
133 .id_table = a4_devices, 156 .id_table = a4_devices,
157 .input_mapping = a4_input_mapping,
134 .input_mapped = a4_input_mapped, 158 .input_mapped = a4_input_mapped,
135 .event = a4_event, 159 .event = a4_event,
136 .probe = a4_probe, 160 .probe = a4_probe,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 2310c96ccf4a..db1b55df0d13 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
1153 1153
1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); 1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
1155 1155
1156 cp2112_gpio_direction_input(gc, d->hwirq);
1157
1158 if (!dev->gpio_poll) { 1156 if (!dev->gpio_poll) {
1159 dev->gpio_poll = true; 1157 dev->gpio_poll = true;
1160 schedule_delayed_work(&dev->gpio_poll_worker, 0); 1158 schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
1204 return PTR_ERR(dev->desc[pin]); 1202 return PTR_ERR(dev->desc[pin]);
1205 } 1203 }
1206 1204
1205 ret = cp2112_gpio_direction_input(&dev->gc, pin);
1206 if (ret < 0) {
1207 dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
1208 goto err_desc;
1209 }
1210
1207 ret = gpiochip_lock_as_irq(&dev->gc, pin); 1211 ret = gpiochip_lock_as_irq(&dev->gc, pin);
1208 if (ret) { 1212 if (ret) {
1209 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); 1213 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index b3d502421b79..0a38e8e9bc78 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
123 123
124 /* Locate the boot interface, to receive the LED change events */ 124 /* Locate the boot interface, to receive the LED change events */
125 struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0); 125 struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
126 struct hid_device *boot_hid;
127 struct hid_input *boot_hid_input;
126 128
127 struct hid_device *boot_hid = usb_get_intfdata(boot_interface); 129 if (unlikely(boot_interface == NULL))
128 struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs, 130 return -ENODEV;
131
132 boot_hid = usb_get_intfdata(boot_interface);
133 boot_hid_input = list_first_entry(&boot_hid->inputs,
129 struct hid_input, list); 134 struct hid_input, list);
130 135
131 return boot_hid_input->input->event(boot_hid_input->input, type, code, 136 return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0d695f8e1b2c..0a00be19f7a0 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -568,6 +568,7 @@
568#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a 568#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
569#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a 569#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
570#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a 570#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
571#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
571 572
572#define USB_VENDOR_ID_HUION 0x256c 573#define USB_VENDOR_ID_HUION 0x256c
573#define USB_DEVICE_ID_HUION_TABLET 0x006e 574#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -768,7 +769,8 @@
768#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f 769#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
769#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532 770#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
770#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 771#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
771#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539 772#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED 0xc539
773#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
772#define USB_DEVICE_ID_SPACETRAVELLER 0xc623 774#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
773#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 775#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
774#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 776#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -989,6 +991,7 @@
989#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 991#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
990#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa 992#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
991#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 993#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
994#define USB_DEVICE_ID_SAITEK_X52 0x075c
992 995
993#define USB_VENDOR_ID_SAMSUNG 0x0419 996#define USB_VENDOR_ID_SAMSUNG 0x0419
994#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 997#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 6196217a7d93..cc47f948c1d0 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1125,7 +1125,7 @@ static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev)
1125 HID_REQ_SET_REPORT); 1125 HID_REQ_SET_REPORT);
1126 1126
1127 kfree(hidpp_report); 1127 kfree(hidpp_report);
1128 return retval; 1128 return (retval < 0) ? retval : 0;
1129} 1129}
1130 1130
1131static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) 1131static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
@@ -1832,13 +1832,17 @@ static const struct hid_device_id logi_dj_receivers[] = {
1832 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1832 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1833 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2), 1833 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
1834 .driver_data = recvr_type_hidpp}, 1834 .driver_data = recvr_type_hidpp},
1835 { /* Logitech gaming receiver (0xc539) */ 1835 { /* Logitech lightspeed receiver (0xc539) */
1836 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1836 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1837 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING), 1837 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED),
1838 .driver_data = recvr_type_gaming_hidpp}, 1838 .driver_data = recvr_type_gaming_hidpp},
1839 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ 1839 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
1840 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), 1840 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
1841 .driver_data = recvr_type_27mhz}, 1841 .driver_data = recvr_type_27mhz},
1842 { /* Logitech powerplay receiver (0xc53a) */
1843 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1844 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
1845 .driver_data = recvr_type_gaming_hidpp},
1842 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */ 1846 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
1843 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1847 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1844 USB_DEVICE_ID_S510_RECEIVER_2), 1848 USB_DEVICE_ID_S510_RECEIVER_2),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index e3b6245bf4b2..0179f7ed77e5 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,15 +3749,23 @@ static const struct hid_device_id hidpp_devices[] = {
3749 3749
3750 { L27MHZ_DEVICE(HID_ANY_ID) }, 3750 { L27MHZ_DEVICE(HID_ANY_ID) },
3751 3751
3752 { /* Logitech G403 Gaming Mouse over USB */ 3752 { /* Logitech G403 Wireless Gaming Mouse over USB */
3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, 3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
3754 { /* Logitech G700 Gaming Mouse over USB */ 3754 { /* Logitech G703 Gaming Mouse over USB */
3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) }, 3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
3756 { /* Logitech G703 Hero Gaming Mouse over USB */
3757 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) },
3756 { /* Logitech G900 Gaming Mouse over USB */ 3758 { /* Logitech G900 Gaming Mouse over USB */
3757 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) }, 3759 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
3760 { /* Logitech G903 Gaming Mouse over USB */
3761 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
3762 { /* Logitech G903 Hero Gaming Mouse over USB */
3763 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
3758 { /* Logitech G920 Wheel over USB */ 3764 { /* Logitech G920 Wheel over USB */
3759 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), 3765 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
3760 .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, 3766 .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
3767 { /* Logitech G Pro Gaming Mouse over USB */
3768 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
3761 3769
3762 { /* MX5000 keyboard over Bluetooth */ 3770 { /* MX5000 keyboard over Bluetooth */
3763 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305), 3771 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 185a577c46f6..166f41f3173b 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -92,6 +92,7 @@ static const struct hid_device_id hid_quirks[] = {
92 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, 92 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
93 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, 93 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
94 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, 94 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
95 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
95 { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT }, 96 { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
96 { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT }, 97 { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
97 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT }, 98 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -141,6 +142,7 @@ static const struct hid_device_id hid_quirks[] = {
141 { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, 142 { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
142 { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, 143 { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
143 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, 144 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
145 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
144 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, 146 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
145 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, 147 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
146 { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, 148 { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 93942063b51b..49dd2d905c7f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc);
585static inline void sony_schedule_work(struct sony_sc *sc, 585static inline void sony_schedule_work(struct sony_sc *sc,
586 enum sony_worker which) 586 enum sony_worker which)
587{ 587{
588 unsigned long flags;
589
588 switch (which) { 590 switch (which) {
589 case SONY_WORKER_STATE: 591 case SONY_WORKER_STATE:
590 if (!sc->defer_initialization) 592 spin_lock_irqsave(&sc->lock, flags);
593 if (!sc->defer_initialization && sc->state_worker_initialized)
591 schedule_work(&sc->state_worker); 594 schedule_work(&sc->state_worker);
595 spin_unlock_irqrestore(&sc->lock, flags);
592 break; 596 break;
593 case SONY_WORKER_HOTPLUG: 597 case SONY_WORKER_HOTPLUG:
594 if (sc->hotplug_worker_initialized) 598 if (sc->hotplug_worker_initialized)
@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
2558 2562
2559static inline void sony_cancel_work_sync(struct sony_sc *sc) 2563static inline void sony_cancel_work_sync(struct sony_sc *sc)
2560{ 2564{
2565 unsigned long flags;
2566
2561 if (sc->hotplug_worker_initialized) 2567 if (sc->hotplug_worker_initialized)
2562 cancel_work_sync(&sc->hotplug_worker); 2568 cancel_work_sync(&sc->hotplug_worker);
2563 if (sc->state_worker_initialized) 2569 if (sc->state_worker_initialized) {
2570 spin_lock_irqsave(&sc->lock, flags);
2571 sc->state_worker_initialized = 0;
2572 spin_unlock_irqrestore(&sc->lock, flags);
2564 cancel_work_sync(&sc->state_worker); 2573 cancel_work_sync(&sc->state_worker);
2574 }
2565} 2575}
2566 2576
2567
2568static int sony_input_configured(struct hid_device *hdev, 2577static int sony_input_configured(struct hid_device *hdev,
2569 struct hid_input *hidinput) 2578 struct hid_input *hidinput)
2570{ 2579{
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index e12f2588ddeb..bdfc5ff3b2c5 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -22,6 +22,8 @@
22 22
23#include "hid-ids.h" 23#include "hid-ids.h"
24 24
25#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
26
25static const signed short ff_rumble[] = { 27static const signed short ff_rumble[] = {
26 FF_RUMBLE, 28 FF_RUMBLE,
27 -1 29 -1
@@ -76,6 +78,7 @@ static int tmff_play(struct input_dev *dev, void *data,
76 struct hid_field *ff_field = tmff->ff_field; 78 struct hid_field *ff_field = tmff->ff_field;
77 int x, y; 79 int x, y;
78 int left, right; /* Rumbling */ 80 int left, right; /* Rumbling */
81 int motor_swap;
79 82
80 switch (effect->type) { 83 switch (effect->type) {
81 case FF_CONSTANT: 84 case FF_CONSTANT:
@@ -100,6 +103,13 @@ static int tmff_play(struct input_dev *dev, void *data,
100 ff_field->logical_minimum, 103 ff_field->logical_minimum,
101 ff_field->logical_maximum); 104 ff_field->logical_maximum);
102 105
106 /* 2-in-1 strong motor is left */
107 if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
108 motor_swap = left;
109 left = right;
110 right = motor_swap;
111 }
112
103 dbg_hid("(left,right)=(%08x, %08x)\n", left, right); 113 dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
104 ff_field->value[0] = left; 114 ff_field->value[0] = left;
105 ff_field->value[1] = right; 115 ff_field->value[1] = right;
@@ -226,6 +236,8 @@ static const struct hid_device_id tm_devices[] = {
226 .driver_data = (unsigned long)ff_rumble }, 236 .driver_data = (unsigned long)ff_rumble },
227 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ 237 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
228 .driver_data = (unsigned long)ff_rumble }, 238 .driver_data = (unsigned long)ff_rumble },
239 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
240 .driver_data = (unsigned long)ff_rumble },
229 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ 241 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
230 .driver_data = (unsigned long)ff_rumble }, 242 .driver_data = (unsigned long)ff_rumble },
231 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ 243 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1065692f90e2..5792a104000a 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,6 +24,7 @@
24#define ICL_MOBILE_DEVICE_ID 0x34FC 24#define ICL_MOBILE_DEVICE_ID 0x34FC
25#define SPT_H_DEVICE_ID 0xA135 25#define SPT_H_DEVICE_ID 0xA135
26#define CML_LP_DEVICE_ID 0x02FC 26#define CML_LP_DEVICE_ID 0x02FC
27#define EHL_Ax_DEVICE_ID 0x4BB3
27 28
28#define REVISION_ID_CHT_A0 0x6 29#define REVISION_ID_CHT_A0 0x6
29#define REVISION_ID_CHT_Ax_SI 0x0 30#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aa80b4d3b740..279567baca3d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, 33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, 35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
36 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
36 {0, } 37 {0, }
37}; 38};
38MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 39MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 55b72573066b..4e11cc6fc34b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
284 spin_unlock_irq(&list->hiddev->list_lock); 284 spin_unlock_irq(&list->hiddev->list_lock);
285 285
286 mutex_lock(&hiddev->existancelock); 286 mutex_lock(&hiddev->existancelock);
287 /*
288 * recheck exist with existance lock held to
289 * avoid opening a disconnected device
290 */
291 if (!list->hiddev->exist) {
292 res = -ENODEV;
293 goto bail_unlock;
294 }
287 if (!list->hiddev->open++) 295 if (!list->hiddev->open++)
288 if (list->hiddev->exist) { 296 if (list->hiddev->exist) {
289 struct hid_device *hid = hiddev->hid; 297 struct hid_device *hid = hiddev->hid;
@@ -300,6 +308,10 @@ bail_normal_power:
300 hid_hw_power(hid, PM_HINT_NORMAL); 308 hid_hw_power(hid, PM_HINT_NORMAL);
301bail_unlock: 309bail_unlock:
302 mutex_unlock(&hiddev->existancelock); 310 mutex_unlock(&hiddev->existancelock);
311
312 spin_lock_irq(&list->hiddev->list_lock);
313 list_del(&list->node);
314 spin_unlock_irq(&list->hiddev->list_lock);
303bail: 315bail:
304 file->private_data = NULL; 316 file->private_data = NULL;
305 vfree(list); 317 vfree(list);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8fc36a28081b..1713235d28cb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -533,14 +533,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
533 */ 533 */
534 buttons = (data[4] << 1) | (data[3] & 0x01); 534 buttons = (data[4] << 1) | (data[3] & 0x01);
535 } else if (features->type == CINTIQ_COMPANION_2) { 535 } else if (features->type == CINTIQ_COMPANION_2) {
536 /* d-pad right -> data[4] & 0x10 536 /* d-pad right -> data[2] & 0x10
537 * d-pad up -> data[4] & 0x20 537 * d-pad up -> data[2] & 0x20
538 * d-pad left -> data[4] & 0x40 538 * d-pad left -> data[2] & 0x40
539 * d-pad down -> data[4] & 0x80 539 * d-pad down -> data[2] & 0x80
540 * d-pad center -> data[3] & 0x01 540 * d-pad center -> data[1] & 0x01
541 */ 541 */
542 buttons = ((data[2] >> 4) << 7) | 542 buttons = ((data[2] >> 4) << 7) |
543 ((data[1] & 0x04) << 6) | 543 ((data[1] & 0x04) << 4) |
544 ((data[2] & 0x0F) << 2) | 544 ((data[2] & 0x0F) << 2) |
545 (data[1] & 0x03); 545 (data[1] & 0x03);
546 } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { 546 } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
846 y >>= 1; 846 y >>= 1;
847 distance >>= 1; 847 distance >>= 1;
848 } 848 }
849 if (features->type == INTUOSHT2)
850 distance = features->distance_max - distance;
849 input_report_abs(input, ABS_X, x); 851 input_report_abs(input, ABS_X, x);
850 input_report_abs(input, ABS_Y, y); 852 input_report_abs(input, ABS_Y, y);
851 input_report_abs(input, ABS_DISTANCE, distance); 853 input_report_abs(input, ABS_DISTANCE, distance);
@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1059 input_report_key(input, BTN_BASE2, (data[11] & 0x02)); 1061 input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1060 1062
1061 if (data[12] & 0x80) 1063 if (data[12] & 0x80)
1062 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); 1064 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1063 else 1065 else
1064 input_report_abs(input, ABS_WHEEL, 0); 1066 input_report_abs(input, ABS_WHEEL, 0);
1065 1067
@@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1290 } 1292 }
1291 if (wacom->tool[0]) { 1293 if (wacom->tool[0]) {
1292 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1294 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
1293 if (wacom->features.type == INTUOSP2_BT) { 1295 if (wacom->features.type == INTUOSP2_BT ||
1296 wacom->features.type == INTUOSP2S_BT) {
1294 input_report_abs(pen_input, ABS_DISTANCE, 1297 input_report_abs(pen_input, ABS_DISTANCE,
1295 range ? frame[13] : wacom->features.distance_max); 1298 range ? frame[13] : wacom->features.distance_max);
1296 } else { 1299 } else {
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5f9505a087f6..23f358cb7f49 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -26,7 +26,7 @@
26 26
27static unsigned long virt_to_hvpfn(void *addr) 27static unsigned long virt_to_hvpfn(void *addr)
28{ 28{
29 unsigned long paddr; 29 phys_addr_t paddr;
30 30
31 if (is_vmalloc_addr(addr)) 31 if (is_vmalloc_addr(addr))
32 paddr = page_to_phys(vmalloc_to_page(addr)) + 32 paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 999f80a63bff..e70783e33680 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2 2
3#undef TRACE_SYSTEM 3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM hyperv 4#define TRACE_SYSTEM hyperv
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 362e70e9d145..fb16a622e8ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -146,8 +146,6 @@ struct hv_context {
146 */ 146 */
147 u64 guestid; 147 u64 guestid;
148 148
149 void *tsc_page;
150
151 struct hv_per_cpu_context __percpu *cpu_context; 149 struct hv_per_cpu_context __percpu *cpu_context;
152 150
153 /* 151 /*
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 3fb9c0a2d6d0..ce5ec403ec73 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -343,7 +343,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
343 data->sample_time = MSEC_PER_SEC / 2; 343 data->sample_time = MSEC_PER_SEC / 2;
344 break; 344 break;
345 case tmp75b: /* not one-shot mode, Conversion rate 37Hz */ 345 case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
346 clr_mask |= 1 << 15 | 0x3 << 13; 346 clr_mask |= 1 << 7 | 0x3 << 5;
347 data->resolution = 12; 347 data->resolution = 12;
348 data->sample_time = MSEC_PER_SEC / 37; 348 data->sample_time = MSEC_PER_SEC / 37;
349 break; 349 break;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index e7dff5febe16..d42bc0883a32 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
852static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 }; 852static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
853static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 }; 853static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
854static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a }; 854static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
855static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c }; 855static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
856static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c }; 856static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
857static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d }; 857static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
858 858
@@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev)
3764 data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME; 3764 data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
3765 data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME; 3765 data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
3766 data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME; 3766 data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
3767 data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
3767 data->REG_PWM[0] = NCT6106_REG_PWM; 3768 data->REG_PWM[0] = NCT6106_REG_PWM;
3768 data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT; 3769 data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
3769 data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT; 3770 data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index ec7bcf8d7cd6..f3dd2a17bd42 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -704,7 +704,7 @@ static struct attribute *nct7802_in_attrs[] = {
704 &sensor_dev_attr_in3_alarm.dev_attr.attr, 704 &sensor_dev_attr_in3_alarm.dev_attr.attr,
705 &sensor_dev_attr_in3_beep.dev_attr.attr, 705 &sensor_dev_attr_in3_beep.dev_attr.attr,
706 706
707 &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */ 707 &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
708 &sensor_dev_attr_in4_min.dev_attr.attr, 708 &sensor_dev_attr_in4_min.dev_attr.attr,
709 &sensor_dev_attr_in4_max.dev_attr.attr, 709 &sensor_dev_attr_in4_max.dev_attr.attr,
710 &sensor_dev_attr_in4_alarm.dev_attr.attr, 710 &sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -730,9 +730,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
730 730
731 if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */ 731 if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
732 return 0; 732 return 0;
733 if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */ 733 if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
734 return 0; 734 return 0;
735 if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */ 735 if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
736 return 0; 736 return 0;
737 737
738 return attr->mode; 738 return attr->mode;
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index a7d2b16dd702..30e18eb60da7 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -408,8 +408,10 @@ static ssize_t occ_show_power_1(struct device *dev,
408 408
409static u64 occ_get_powr_avg(u64 *accum, u32 *samples) 409static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
410{ 410{
411 return div64_u64(get_unaligned_be64(accum) * 1000000ULL, 411 u64 divisor = get_unaligned_be32(samples);
412 get_unaligned_be32(samples)); 412
413 return (divisor == 0) ? 0 :
414 div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
413} 415}
414 416
415static ssize_t occ_show_power_2(struct device *dev, 417static ssize_t occ_show_power_2(struct device *dev,
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 5c1ca0df5cb0..84f1dcb69827 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -544,6 +544,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
544 /* See function coresight_get_sink_by_id() to know where this is used */ 544 /* See function coresight_get_sink_by_id() to know where this is used */
545 hash = hashlen_hash(hashlen_string(NULL, name)); 545 hash = hashlen_hash(hashlen_string(NULL, name));
546 546
547 sysfs_attr_init(&ea->attr.attr);
547 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); 548 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
548 if (!ea->attr.attr.name) 549 if (!ea->attr.attr.name)
549 return -ENOMEM; 550 return -ENOMEM;
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 574c16004cb2..13d9b141daaa 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures 3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
4 * 4 *
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c0378c3de9a4..91dfeba62485 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
165 .driver_data = (kernel_ulong_t)0, 165 .driver_data = (kernel_ulong_t)0,
166 }, 166 },
167 { 167 {
168 /* Lewisburg PCH */
169 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
170 .driver_data = (kernel_ulong_t)0,
171 },
172 {
168 /* Gemini Lake */ 173 /* Gemini Lake */
169 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 174 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
170 .driver_data = (kernel_ulong_t)&intel_th_2x, 175 .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
199 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), 204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
200 .driver_data = (kernel_ulong_t)&intel_th_2x, 205 .driver_data = (kernel_ulong_t)&intel_th_2x,
201 }, 206 },
207 {
208 /* Tiger Lake PCH */
209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
210 .driver_data = (kernel_ulong_t)&intel_th_2x,
211 },
202 { 0 }, 212 { 0 },
203}; 213};
204 214
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index e9381babc84c..7dfc0431333b 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Intel(R) Trace Hub PTI output data structures 3 * Intel(R) Trace Hub PTI output data structures
4 * 4 *
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e55b902560de..181e7ff1ec4f 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1276,7 +1276,6 @@ int stm_source_register_device(struct device *parent,
1276 1276
1277err: 1277err:
1278 put_device(&src->dev); 1278 put_device(&src->dev);
1279 kfree(src);
1280 1279
1281 return err; 1280 return err;
1282} 1281}
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 8d55cdd69ff4..435c7d7377a3 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = {
142 142
143static struct at91_twi_pdata sama5d2_config = { 143static struct at91_twi_pdata sama5d2_config = {
144 .clk_max_div = 7, 144 .clk_max_div = 7,
145 .clk_offset = 4, 145 .clk_offset = 3,
146 .has_unre_flag = true, 146 .has_unre_flag = true,
147 .has_alt_cmd = true, 147 .has_alt_cmd = true,
148 .has_hold_field = true, 148 .has_hold_field = true,
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index e87232f2e708..a3fcc35ffd3b 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
122 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); 122 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
123 123
124 /* send stop when last byte has been written */ 124 /* send stop when last byte has been written */
125 if (--dev->buf_len == 0) 125 if (--dev->buf_len == 0) {
126 if (!dev->use_alt_cmd) 126 if (!dev->use_alt_cmd)
127 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); 127 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
128 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
129 }
128 130
129 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); 131 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
130 132
@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
542 } else { 544 } else {
543 at91_twi_write_next_byte(dev); 545 at91_twi_write_next_byte(dev);
544 at91_twi_write(dev, AT91_TWI_IER, 546 at91_twi_write(dev, AT91_TWI_IER,
545 AT91_TWI_TXCOMP | 547 AT91_TWI_TXCOMP | AT91_TWI_NACK |
546 AT91_TWI_NACK | 548 (dev->buf_len ? AT91_TWI_TXRDY : 0));
547 AT91_TWI_TXRDY);
548 } 549 }
549 } 550 }
550 551
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 2c7f145a036e..19ef2b0c682a 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
392static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) 392static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
393{ 393{
394 struct i2c_msg *msg = iproc_i2c->msg; 394 struct i2c_msg *msg = iproc_i2c->msg;
395 uint32_t val;
395 396
396 /* Read valid data from RX FIFO */ 397 /* Read valid data from RX FIFO */
397 while (iproc_i2c->rx_bytes < msg->len) { 398 while (iproc_i2c->rx_bytes < msg->len) {
398 if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT) 399 val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET);
399 & M_FIFO_RX_CNT_MASK)) 400
401 /* rx fifo empty */
402 if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK))
400 break; 403 break;
401 404
402 msg->buf[iproc_i2c->rx_bytes] = 405 msg->buf[iproc_i2c->rx_bytes] =
403 (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >> 406 (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
404 M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
405 iproc_i2c->rx_bytes++; 407 iproc_i2c->rx_bytes++;
406 } 408 }
407} 409}
@@ -788,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
788 790
789static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) 791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
790{ 792{
791 u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 793 u32 val;
794
795 /* We do not support the SMBUS Quick command */
796 val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
792 797
793 if (adap->algo->reg_slave) 798 if (adap->algo->reg_slave)
794 val |= I2C_FUNC_SLAVE; 799 val |= I2C_FUNC_SLAVE;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index e7f9305b2dd9..f5f001738df5 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
94 94
95 dev->disable_int(dev); 95 dev->disable_int(dev);
96 dev->disable(dev); 96 dev->disable(dev);
97 synchronize_irq(dev->irq);
97 dev->slave = NULL; 98 dev->slave = NULL;
98 pm_runtime_put(dev->dev); 99 pm_runtime_put(dev->dev);
99 100
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 35b302d983e0..959d4912ec0d 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -69,6 +69,7 @@ struct em_i2c_device {
69 struct completion msg_done; 69 struct completion msg_done;
70 struct clk *sclk; 70 struct clk *sclk;
71 struct i2c_client *slave; 71 struct i2c_client *slave;
72 int irq;
72}; 73};
73 74
74static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) 75static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
339 340
340 writeb(0, priv->base + I2C_OFS_SVA0); 341 writeb(0, priv->base + I2C_OFS_SVA0);
341 342
343 /*
344 * Wait for interrupt to finish. New slave irqs cannot happen because we
345 * cleared the slave address and, thus, only extension codes will be
346 * detected which do not use the slave ptr.
347 */
348 synchronize_irq(priv->irq);
342 priv->slave = NULL; 349 priv->slave = NULL;
343 350
344 return 0; 351 return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
355{ 362{
356 struct em_i2c_device *priv; 363 struct em_i2c_device *priv;
357 struct resource *r; 364 struct resource *r;
358 int irq, ret; 365 int ret;
359 366
360 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 367 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
361 if (!priv) 368 if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
390 397
391 em_i2c_reset(&priv->adap); 398 em_i2c_reset(&priv->adap);
392 399
393 irq = platform_get_irq(pdev, 0); 400 priv->irq = platform_get_irq(pdev, 0);
394 ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0, 401 ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
395 "em_i2c", priv); 402 "em_i2c", priv);
396 if (ret) 403 if (ret)
397 goto err_clk; 404 goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
401 if (ret) 408 if (ret)
402 goto err_clk; 409 goto err_clk;
403 410
404 dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq); 411 dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
412 priv->irq);
405 413
406 return 0; 414 return 0;
407 415
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f2956936c3f2..2e08b4722dc4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
1194 int i; 1194 int i;
1195 1195
1196 status = acpi_get_object_info(obj_handle, &info); 1196 status = acpi_get_object_info(obj_handle, &info);
1197 if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID)) 1197 if (ACPI_FAILURE(status))
1198 return AE_OK; 1198 return AE_OK;
1199 1199
1200 if (!(info->valid & ACPI_VALID_HID))
1201 goto smo88xx_not_found;
1202
1200 hid = info->hardware_id.string; 1203 hid = info->hardware_id.string;
1201 if (!hid) 1204 if (!hid)
1202 return AE_OK; 1205 goto smo88xx_not_found;
1203 1206
1204 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); 1207 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
1205 if (i < 0) 1208 if (i < 0)
1206 return AE_OK; 1209 goto smo88xx_not_found;
1210
1211 kfree(info);
1207 1212
1208 *((bool *)return_value) = true; 1213 *((bool *)return_value) = true;
1209 return AE_CTRL_TERMINATE; 1214 return AE_CTRL_TERMINATE;
1215
1216smo88xx_not_found:
1217 kfree(info);
1218 return AE_OK;
1210} 1219}
1211 1220
1212static bool is_dell_system_with_lis3lv02d(void) 1221static bool is_dell_system_with_lis3lv02d(void)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b1b8b938d7f4..15f6cde6452f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
273} 273}
274 274
275/* Functions for DMA support */ 275/* Functions for DMA support */
276static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, 276static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
277 dma_addr_t phy_addr) 277 dma_addr_t phy_addr)
278{ 278{
279 struct imx_i2c_dma *dma; 279 struct imx_i2c_dma *dma;
280 struct dma_slave_config dma_sconfig; 280 struct dma_slave_config dma_sconfig;
@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
283 283
284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
285 if (!dma) 285 if (!dma)
286 return -ENOMEM; 286 return;
287 287
288 dma->chan_tx = dma_request_chan(dev, "tx"); 288 dma->chan_tx = dma_request_chan(dev, "tx");
289 if (IS_ERR(dma->chan_tx)) { 289 if (IS_ERR(dma->chan_tx)) {
@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", 328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); 329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
330 330
331 return 0; 331 return;
332 332
333fail_rx: 333fail_rx:
334 dma_release_channel(dma->chan_rx); 334 dma_release_channel(dma->chan_rx);
@@ -336,8 +336,6 @@ fail_tx:
336 dma_release_channel(dma->chan_tx); 336 dma_release_channel(dma->chan_tx);
337fail_al: 337fail_al:
338 devm_kfree(dev, dma); 338 devm_kfree(dev, dma);
339 /* return successfully if there is no dma support */
340 return ret == -ENODEV ? 0 : ret;
341} 339}
342 340
343static void i2c_imx_dma_callback(void *arg) 341static void i2c_imx_dma_callback(void *arg)
@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
1165 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); 1163 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
1166 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", 1164 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
1167 i2c_imx->adapter.name); 1165 i2c_imx->adapter.name);
1166 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1168 1167
1169 /* Init DMA config if supported */ 1168 /* Init DMA config if supported */
1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr); 1169 i2c_imx_dma_request(i2c_imx, phy_addr);
1171 if (ret < 0)
1172 goto del_adapter;
1173 1170
1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1175 return 0; /* Return OK */ 1171 return 0; /* Return OK */
1176 1172
1177del_adapter:
1178 i2c_del_adapter(&i2c_imx->adapter);
1179clk_notifier_unregister: 1173clk_notifier_unregister:
1180 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); 1174 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
1181rpm_disable: 1175rpm_disable:
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 252edb433fdf..29eae1bf4f86 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
234 .max_num_msgs = 255, 234 .max_num_msgs = 255,
235}; 235};
236 236
237static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
238 .flags = I2C_AQ_NO_ZERO_LEN,
239};
240
237static const struct mtk_i2c_compatible mt2712_compat = { 241static const struct mtk_i2c_compatible mt2712_compat = {
238 .regs = mt_i2c_regs_v1, 242 .regs = mt_i2c_regs_v1,
239 .pmic_i2c = 0, 243 .pmic_i2c = 0,
@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
298}; 302};
299 303
300static const struct mtk_i2c_compatible mt8183_compat = { 304static const struct mtk_i2c_compatible mt8183_compat = {
305 .quirks = &mt8183_i2c_quirks,
301 .regs = mt_i2c_regs_v2, 306 .regs = mt_i2c_regs_v2,
302 .pmic_i2c = 0, 307 .pmic_i2c = 0,
303 .dcm = 0, 308 .dcm = 0,
@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
870 875
871static u32 mtk_i2c_functionality(struct i2c_adapter *adap) 876static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
872{ 877{
873 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 878 if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
879 return I2C_FUNC_I2C |
880 (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
881 else
882 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
874} 883}
875 884
876static const struct i2c_algorithm mtk_i2c_algorithm = { 885static const struct i2c_algorithm mtk_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index cfc76b5de726..5a1235fd86bb 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev)
364/* 364/*
365 * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work 365 * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work
366 * correctly. Without it, lspci shows runtime pm status as "D0" for the card. 366 * correctly. Without it, lspci shows runtime pm status as "D0" for the card.
367 * Documentation/power/pci.txt also insists for driver to provide this. 367 * Documentation/power/pci.rst also insists for driver to provide this.
368 */ 368 */
369static __maybe_unused int gpu_i2c_suspend(struct device *dev) 369static __maybe_unused int gpu_i2c_suspend(struct device *dev)
370{ 370{
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c46c4bddc7ca..cba325eb852f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -91,7 +91,7 @@
91#define SB800_PIIX4_PORT_IDX_MASK 0x06 91#define SB800_PIIX4_PORT_IDX_MASK 0x06
92#define SB800_PIIX4_PORT_IDX_SHIFT 1 92#define SB800_PIIX4_PORT_IDX_SHIFT 1
93 93
94/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ 94/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
358 /* Find which register is used for port selection */ 358 /* Find which register is used for port selection */
359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || 359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { 360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
361 switch (PIIX4_dev->device) { 361 if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
362 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: 362 (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
363 PIIX4_dev->revision >= 0x1F)) {
363 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; 364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
364 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; 365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
365 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; 366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
366 break; 367 } else {
367 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
368 default:
369 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 368 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
370 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; 369 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
371 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; 370 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
372 break;
373 } 371 }
374 } else { 372 } else {
375 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, 373 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d39a4606f72d..531c01100b56 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
139 enum dma_data_direction dma_direction; 139 enum dma_data_direction dma_direction;
140 140
141 struct reset_control *rstc; 141 struct reset_control *rstc;
142 int irq;
142}; 143};
143 144
144#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) 145#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
861 862
862 WARN_ON(!priv->slave); 863 WARN_ON(!priv->slave);
863 864
865 /* disable irqs and ensure none is running before clearing ptr */
864 rcar_i2c_write(priv, ICSIER, 0); 866 rcar_i2c_write(priv, ICSIER, 0);
865 rcar_i2c_write(priv, ICSCR, 0); 867 rcar_i2c_write(priv, ICSCR, 0);
866 868
869 synchronize_irq(priv->irq);
867 priv->slave = NULL; 870 priv->slave = NULL;
868 871
869 pm_runtime_put(rcar_i2c_priv_to_dev(priv)); 872 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
918 struct i2c_adapter *adap; 921 struct i2c_adapter *adap;
919 struct device *dev = &pdev->dev; 922 struct device *dev = &pdev->dev;
920 struct i2c_timings i2c_t; 923 struct i2c_timings i2c_t;
921 int irq, ret; 924 int ret;
922 925
923 /* Otherwise logic will break because some bytes must always use PIO */ 926 /* Otherwise logic will break because some bytes must always use PIO */
924 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length"); 927 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
@@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
984 pm_runtime_put(dev); 987 pm_runtime_put(dev);
985 988
986 989
987 irq = platform_get_irq(pdev, 0); 990 priv->irq = platform_get_irq(pdev, 0);
988 ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv); 991 ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
989 if (ret < 0) { 992 if (ret < 0) {
990 dev_err(dev, "cannot get irq %d\n", irq); 993 dev_err(dev, "cannot get irq %d\n", priv->irq);
991 goto out_pm_disable; 994 goto out_pm_disable;
992 } 995 }
993 996
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index d97fb857b0ea..c98ef4c4a0c9 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
435 * fall through to the write state, as we will need to 435 * fall through to the write state, as we will need to
436 * send a byte as well 436 * send a byte as well
437 */ 437 */
438 /* Fall through */
438 439
439 case STATE_WRITE: 440 case STATE_WRITE:
440 /* 441 /*
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index 868755f82f88..2c21893905a3 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * i2c-stm32.h 3 * i2c-stm32.h
4 * 4 *
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f26ed495d384..9c440fa6a3dd 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
832 */ 832 */
833void i2c_unregister_device(struct i2c_client *client) 833void i2c_unregister_device(struct i2c_client *client)
834{ 834{
835 if (!client) 835 if (IS_ERR_OR_NULL(client))
836 return; 836 return;
837 837
838 if (client->dev.of_node) { 838 if (client->dev.of_node) {
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 46bb2e421bb9..ad19d9c716f4 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -319,7 +319,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
319 .modified = 1, \ 319 .modified = 1, \
320 .info_mask_separate = \ 320 .info_mask_separate = \
321 BIT(IIO_CHAN_INFO_RAW) | \ 321 BIT(IIO_CHAN_INFO_RAW) | \
322 BIT(IIO_CHAN_INFO_SCALE) | \
323 BIT(IIO_CHAN_INFO_CALIBBIAS), \ 322 BIT(IIO_CHAN_INFO_CALIBBIAS), \
324 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \ 323 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
325 .ext_info = cros_ec_accel_legacy_ext_info, \ 324 .ext_info = cros_ec_accel_legacy_ext_info, \
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index 92b1d5037ac9..e234970b7150 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -11,6 +11,7 @@
11#include <linux/iio/iio.h> 11#include <linux/iio/iio.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/iopoll.h> 13#include <linux/iopoll.h>
14#include <linux/kernel.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -22,8 +23,11 @@
22#define JZ_ADC_REG_ADTCH 0x18 23#define JZ_ADC_REG_ADTCH 0x18
23#define JZ_ADC_REG_ADBDAT 0x1c 24#define JZ_ADC_REG_ADBDAT 0x1c
24#define JZ_ADC_REG_ADSDAT 0x20 25#define JZ_ADC_REG_ADSDAT 0x20
26#define JZ_ADC_REG_ADCLK 0x28
25 27
26#define JZ_ADC_REG_CFG_BAT_MD BIT(4) 28#define JZ_ADC_REG_CFG_BAT_MD BIT(4)
29#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
30#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
27 31
28#define JZ_ADC_AUX_VREF 3300 32#define JZ_ADC_AUX_VREF 3300
29#define JZ_ADC_AUX_VREF_BITS 12 33#define JZ_ADC_AUX_VREF_BITS 12
@@ -34,6 +38,8 @@
34#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986) 38#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
35#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12 39#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
36 40
41struct ingenic_adc;
42
37struct ingenic_adc_soc_data { 43struct ingenic_adc_soc_data {
38 unsigned int battery_high_vref; 44 unsigned int battery_high_vref;
39 unsigned int battery_high_vref_bits; 45 unsigned int battery_high_vref_bits;
@@ -41,6 +47,7 @@ struct ingenic_adc_soc_data {
41 size_t battery_raw_avail_size; 47 size_t battery_raw_avail_size;
42 const int *battery_scale_avail; 48 const int *battery_scale_avail;
43 size_t battery_scale_avail_size; 49 size_t battery_scale_avail_size;
50 int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
44}; 51};
45 52
46struct ingenic_adc { 53struct ingenic_adc {
@@ -151,6 +158,42 @@ static const int jz4740_adc_battery_scale_avail[] = {
151 JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS, 158 JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
152}; 159};
153 160
161static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
162{
163 struct clk *parent_clk;
164 unsigned long parent_rate, rate;
165 unsigned int div_main, div_10us;
166
167 parent_clk = clk_get_parent(adc->clk);
168 if (!parent_clk) {
169 dev_err(dev, "ADC clock has no parent\n");
170 return -ENODEV;
171 }
172 parent_rate = clk_get_rate(parent_clk);
173
174 /*
175 * The JZ4725B ADC works at 500 kHz to 8 MHz.
176 * We pick the highest rate possible.
177 * In practice we typically get 6 MHz, half of the 12 MHz EXT clock.
178 */
179 div_main = DIV_ROUND_UP(parent_rate, 8000000);
180 div_main = clamp(div_main, 1u, 64u);
181 rate = parent_rate / div_main;
182 if (rate < 500000 || rate > 8000000) {
183 dev_err(dev, "No valid divider for ADC main clock\n");
184 return -EINVAL;
185 }
186
187 /* We also need a divider that produces a 10us clock. */
188 div_10us = DIV_ROUND_UP(rate, 100000);
189
190 writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
191 (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
192 adc->base + JZ_ADC_REG_ADCLK);
193
194 return 0;
195}
196
154static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = { 197static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
155 .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF, 198 .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF,
156 .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS, 199 .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
@@ -158,6 +201,7 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
158 .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail), 201 .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
159 .battery_scale_avail = jz4725b_adc_battery_scale_avail, 202 .battery_scale_avail = jz4725b_adc_battery_scale_avail,
160 .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail), 203 .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
204 .init_clk_div = jz4725b_adc_init_clk_div,
161}; 205};
162 206
163static const struct ingenic_adc_soc_data jz4740_adc_soc_data = { 207static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
@@ -167,6 +211,7 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
167 .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail), 211 .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
168 .battery_scale_avail = jz4740_adc_battery_scale_avail, 212 .battery_scale_avail = jz4740_adc_battery_scale_avail,
169 .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail), 213 .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
214 .init_clk_div = NULL, /* no ADCLK register on JZ4740 */
170}; 215};
171 216
172static int ingenic_adc_read_avail(struct iio_dev *iio_dev, 217static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
@@ -317,6 +362,15 @@ static int ingenic_adc_probe(struct platform_device *pdev)
317 return ret; 362 return ret;
318 } 363 }
319 364
365 /* Set clock dividers. */
366 if (soc_data->init_clk_div) {
367 ret = soc_data->init_clk_div(dev, adc);
368 if (ret) {
369 clk_disable_unprepare(adc->clk);
370 return ret;
371 }
372 }
373
320 /* Put hardware in a known passive state. */ 374 /* Put hardware in a known passive state. */
321 writeb(0x00, adc->base + JZ_ADC_REG_ENABLE); 375 writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
322 writeb(0xff, adc->base + JZ_ADC_REG_CTRL); 376 writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 917223d5ff5b..da073d72f649 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -83,7 +83,7 @@
83#define MAX9611_TEMP_MAX_POS 0x7f80 83#define MAX9611_TEMP_MAX_POS 0x7f80
84#define MAX9611_TEMP_MAX_NEG 0xff80 84#define MAX9611_TEMP_MAX_NEG 0xff80
85#define MAX9611_TEMP_MIN_NEG 0xd980 85#define MAX9611_TEMP_MIN_NEG 0xd980
86#define MAX9611_TEMP_MASK GENMASK(7, 15) 86#define MAX9611_TEMP_MASK GENMASK(15, 7)
87#define MAX9611_TEMP_SHIFT 0x07 87#define MAX9611_TEMP_SHIFT 0x07
88#define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT) 88#define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
89#define MAX9611_TEMP_SCALE_NUM 1000000 89#define MAX9611_TEMP_SCALE_NUM 1000000
@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
480 if (ret) 480 if (ret)
481 return ret; 481 return ret;
482 482
483 regval = ret & MAX9611_TEMP_MASK; 483 regval &= MAX9611_TEMP_MASK;
484 484
485 if ((regval > MAX9611_TEMP_MAX_POS && 485 if ((regval > MAX9611_TEMP_MAX_POS &&
486 regval < MAX9611_TEMP_MIN_NEG) || 486 regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 2d685730f867..c37f201294b2 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
382 dev_err(dev, 382 dev_err(dev,
383 "Only %i channels supported with %pOFn, but reg = <%i>.\n", 383 "Only %i channels supported with %pOFn, but reg = <%i>.\n",
384 num_channels, child, reg); 384 num_channels, child, reg);
385 return ret; 385 return -EINVAL;
386 } 386 }
387 } 387 }
388 388
@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
391 dev_err(dev, 391 dev_err(dev,
392 "Channel %i uses different ADC mode than the rest.\n", 392 "Channel %i uses different ADC mode than the rest.\n",
393 reg); 393 reg);
394 return ret; 394 return -EINVAL;
395 } 395 }
396 396
397 /* Channel is valid, grab the regulator. */ 397 /* Channel is valid, grab the regulator. */
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index e48f15cc9ab5..ff82863cbf42 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
276 st->buf[0] = st->integer >> 8; 276 st->buf[0] = st->integer >> 8;
277 st->buf[1] = 0x40; /* REG12 default */ 277 st->buf[1] = 0x40; /* REG12 default */
278 st->buf[2] = 0x00; 278 st->buf[2] = 0x00;
279 st->buf[3] = st->fract2 & 0xFF; 279 st->buf[3] = st->fract1 & 0xFF;
280 st->buf[4] = st->fract2 >> 7; 280 st->buf[4] = st->fract1 >> 8;
281 st->buf[5] = st->fract2 >> 15; 281 st->buf[5] = st->fract1 >> 16;
282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) | 282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
283 ADF4371_FRAC1WORD(st->fract1 >> 23); 283 ADF4371_FRAC1WORD(st->fract1 >> 24);
284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7); 284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
285 st->buf[8] = st->mod2 & 0xFF; 285 st->buf[8] = st->mod2 & 0xFF;
286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8); 286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 53a59957cc54..8a704cd5bddb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -845,6 +845,25 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
845 INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z), 845 INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
846}; 846};
847 847
848static const unsigned long inv_mpu_scan_masks[] = {
849 /* 3-axis accel */
850 BIT(INV_MPU6050_SCAN_ACCL_X)
851 | BIT(INV_MPU6050_SCAN_ACCL_Y)
852 | BIT(INV_MPU6050_SCAN_ACCL_Z),
853 /* 3-axis gyro */
854 BIT(INV_MPU6050_SCAN_GYRO_X)
855 | BIT(INV_MPU6050_SCAN_GYRO_Y)
856 | BIT(INV_MPU6050_SCAN_GYRO_Z),
857 /* 6-axis accel + gyro */
858 BIT(INV_MPU6050_SCAN_ACCL_X)
859 | BIT(INV_MPU6050_SCAN_ACCL_Y)
860 | BIT(INV_MPU6050_SCAN_ACCL_Z)
861 | BIT(INV_MPU6050_SCAN_GYRO_X)
862 | BIT(INV_MPU6050_SCAN_GYRO_Y)
863 | BIT(INV_MPU6050_SCAN_GYRO_Z),
864 0,
865};
866
848static const struct iio_chan_spec inv_icm20602_channels[] = { 867static const struct iio_chan_spec inv_icm20602_channels[] = {
849 IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP), 868 IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
850 { 869 {
@@ -871,6 +890,28 @@ static const struct iio_chan_spec inv_icm20602_channels[] = {
871 INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z), 890 INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
872}; 891};
873 892
893static const unsigned long inv_icm20602_scan_masks[] = {
894 /* 3-axis accel + temp (mandatory) */
895 BIT(INV_ICM20602_SCAN_ACCL_X)
896 | BIT(INV_ICM20602_SCAN_ACCL_Y)
897 | BIT(INV_ICM20602_SCAN_ACCL_Z)
898 | BIT(INV_ICM20602_SCAN_TEMP),
899 /* 3-axis gyro + temp (mandatory) */
900 BIT(INV_ICM20602_SCAN_GYRO_X)
901 | BIT(INV_ICM20602_SCAN_GYRO_Y)
902 | BIT(INV_ICM20602_SCAN_GYRO_Z)
903 | BIT(INV_ICM20602_SCAN_TEMP),
904 /* 6-axis accel + gyro + temp (mandatory) */
905 BIT(INV_ICM20602_SCAN_ACCL_X)
906 | BIT(INV_ICM20602_SCAN_ACCL_Y)
907 | BIT(INV_ICM20602_SCAN_ACCL_Z)
908 | BIT(INV_ICM20602_SCAN_GYRO_X)
909 | BIT(INV_ICM20602_SCAN_GYRO_Y)
910 | BIT(INV_ICM20602_SCAN_GYRO_Z)
911 | BIT(INV_ICM20602_SCAN_TEMP),
912 0,
913};
914
874/* 915/*
875 * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and 916 * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and
876 * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the 917 * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the
@@ -1130,9 +1171,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
1130 if (chip_type == INV_ICM20602) { 1171 if (chip_type == INV_ICM20602) {
1131 indio_dev->channels = inv_icm20602_channels; 1172 indio_dev->channels = inv_icm20602_channels;
1132 indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels); 1173 indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
1174 indio_dev->available_scan_masks = inv_icm20602_scan_masks;
1133 } else { 1175 } else {
1134 indio_dev->channels = inv_mpu_channels; 1176 indio_dev->channels = inv_mpu_channels;
1135 indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); 1177 indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
1178 indio_dev->available_scan_masks = inv_mpu_scan_masks;
1136 } 1179 }
1137 1180
1138 indio_dev->info = &mpu_info; 1181 indio_dev->info = &mpu_info;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 19f1730a4f24..a68d0ccf67a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
4724 if (ret) 4724 if (ret)
4725 goto err; 4725 goto err;
4726 4726
4727 cma_configfs_init(); 4727 ret = cma_configfs_init();
4728 if (ret)
4729 goto err_ib;
4728 4730
4729 return 0; 4731 return 0;
4730 4732
4733err_ib:
4734 ib_unregister_client(&cma_client);
4731err: 4735err:
4732 unregister_netdevice_notifier(&cma_nb); 4736 unregister_netdevice_notifier(&cma_nb);
4733 ib_sa_unregister_client(&sa_client); 4737 ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 888d89ce81df..beee7b7e0d9a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
302 struct ib_udata *udata, 302 struct ib_udata *udata,
303 struct ib_uobject *uobj) 303 struct ib_uobject *uobj)
304{ 304{
305 enum ib_qp_type qp_type = attr->qp_type;
305 struct ib_qp *qp; 306 struct ib_qp *qp;
307 bool is_xrc;
306 308
307 if (!dev->ops.create_qp) 309 if (!dev->ops.create_qp)
308 return ERR_PTR(-EOPNOTSUPP); 310 return ERR_PTR(-EOPNOTSUPP);
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
320 * and more importantly they are created internaly by driver, 322 * and more importantly they are created internaly by driver,
321 * see mlx5 create_dev_resources() as an example. 323 * see mlx5 create_dev_resources() as an example.
322 */ 324 */
323 if (attr->qp_type < IB_QPT_XRC_INI) { 325 is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
326 if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
324 qp->res.type = RDMA_RESTRACK_QP; 327 qp->res.type = RDMA_RESTRACK_QP;
325 if (uobj) 328 if (uobj)
326 rdma_restrack_uadd(&qp->res); 329 rdma_restrack_uadd(&qp->res);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 01faef7bc061..af8c85d18e62 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
38 int ret; 38 int ret;
39 39
40 port_counter = &dev->port_data[port].port_counter; 40 port_counter = &dev->port_data[port].port_counter;
41 if (!port_counter->hstats)
42 return -EOPNOTSUPP;
43
41 mutex_lock(&port_counter->lock); 44 mutex_lock(&port_counter->lock);
42 if (on) { 45 if (on) {
43 ret = __counter_set_mode(&port_counter->mode, 46 ret = __counter_set_mode(&port_counter->mode,
@@ -146,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
146 struct auto_mode_param *param = &counter->mode.param; 149 struct auto_mode_param *param = &counter->mode.param;
147 bool match = true; 150 bool match = true;
148 151
149 if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) 152 if (!rdma_is_visible_in_pid_ns(&qp->res))
150 return false; 153 return false;
151 154
152 /* Ensure that counter belong to right PID */ 155 /* Ensure that counter belongs to the right PID */
153 if (!rdma_is_kernel_res(&counter->res) && 156 if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
154 !rdma_is_kernel_res(&qp->res) &&
155 (task_pid_vnr(counter->res.task) != current->pid))
156 return false; 157 return false;
157 158
158 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) 159 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -393,6 +394,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
393 u64 sum; 394 u64 sum;
394 395
395 port_counter = &dev->port_data[port].port_counter; 396 port_counter = &dev->port_data[port].port_counter;
397 if (!port_counter->hstats)
398 return 0;
399
396 sum = get_running_counters_hwstat_sum(dev, port, index); 400 sum = get_running_counters_hwstat_sum(dev, port, index);
397 sum += port_counter->hstats->value[index]; 401 sum += port_counter->hstats->value[index];
398 402
@@ -418,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
418 return qp; 422 return qp;
419 423
420err: 424err:
421 rdma_restrack_put(&qp->res); 425 rdma_restrack_put(res);
422 return NULL; 426 return NULL;
423} 427}
424 428
@@ -506,6 +510,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
506 if (!rdma_is_port_valid(dev, port)) 510 if (!rdma_is_port_valid(dev, port))
507 return -EINVAL; 511 return -EINVAL;
508 512
513 if (!dev->port_data[port].port_counter.hstats)
514 return -EOPNOTSUPP;
515
509 qp = rdma_counter_get_qp(dev, qp_num); 516 qp = rdma_counter_get_qp(dev, qp_num);
510 if (!qp) 517 if (!qp)
511 return -ENOENT; 518 return -ENOENT;
@@ -594,7 +601,7 @@ void rdma_counter_init(struct ib_device *dev)
594 struct rdma_port_counter *port_counter; 601 struct rdma_port_counter *port_counter;
595 u32 port; 602 u32 port;
596 603
597 if (!dev->ops.alloc_hw_stats || !dev->port_data) 604 if (!dev->port_data)
598 return; 605 return;
599 606
600 rdma_for_each_port(dev, port) { 607 rdma_for_each_port(dev, port) {
@@ -602,6 +609,9 @@ void rdma_counter_init(struct ib_device *dev)
602 port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; 609 port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
603 mutex_init(&port_counter->lock); 610 mutex_init(&port_counter->lock);
604 611
612 if (!dev->ops.alloc_hw_stats)
613 continue;
614
605 port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); 615 port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
606 if (!port_counter->hstats) 616 if (!port_counter->hstats)
607 goto fail; 617 goto fail;
@@ -624,9 +634,6 @@ void rdma_counter_release(struct ib_device *dev)
624 struct rdma_port_counter *port_counter; 634 struct rdma_port_counter *port_counter;
625 u32 port; 635 u32 port;
626 636
627 if (!dev->ops.alloc_hw_stats)
628 return;
629
630 rdma_for_each_port(dev, port) { 637 rdma_for_each_port(dev, port) {
631 port_counter = &dev->port_data[port].port_counter; 638 port_counter = &dev->port_data[port].port_counter;
632 kfree(port_counter->hstats); 639 kfree(port_counter->hstats);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9773145dee09..ea8661a00651 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
94static DECLARE_RWSEM(devices_rwsem); 94static DECLARE_RWSEM(devices_rwsem);
95#define DEVICE_REGISTERED XA_MARK_1 95#define DEVICE_REGISTERED XA_MARK_1
96 96
97static LIST_HEAD(client_list); 97static u32 highest_client_id;
98#define CLIENT_REGISTERED XA_MARK_1 98#define CLIENT_REGISTERED XA_MARK_1
99static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); 99static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
100static DECLARE_RWSEM(clients_rwsem); 100static DECLARE_RWSEM(clients_rwsem);
101 101
102static void ib_client_put(struct ib_client *client)
103{
104 if (refcount_dec_and_test(&client->uses))
105 complete(&client->uses_zero);
106}
107
102/* 108/*
103 * If client_data is registered then the corresponding client must also still 109 * If client_data is registered then the corresponding client must also still
104 * be registered. 110 * be registered.
@@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device,
661 667
662 down_write(&device->client_data_rwsem); 668 down_write(&device->client_data_rwsem);
663 /* 669 /*
670 * So long as the client is registered hold both the client and device
671 * unregistration locks.
672 */
673 if (!refcount_inc_not_zero(&client->uses))
674 goto out_unlock;
675 refcount_inc(&device->refcount);
676
677 /*
664 * Another caller to add_client_context got here first and has already 678 * Another caller to add_client_context got here first and has already
665 * completely initialized context. 679 * completely initialized context.
666 */ 680 */
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device,
683 return 0; 697 return 0;
684 698
685out: 699out:
700 ib_device_put(device);
701 ib_client_put(client);
702out_unlock:
686 up_write(&device->client_data_rwsem); 703 up_write(&device->client_data_rwsem);
687 return ret; 704 return ret;
688} 705}
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device,
702 client_data = xa_load(&device->client_data, client_id); 719 client_data = xa_load(&device->client_data, client_id);
703 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); 720 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
704 client = xa_load(&clients, client_id); 721 client = xa_load(&clients, client_id);
705 downgrade_write(&device->client_data_rwsem); 722 up_write(&device->client_data_rwsem);
706 723
707 /* 724 /*
708 * Notice we cannot be holding any exclusive locks when calling the 725 * Notice we cannot be holding any exclusive locks when calling the
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device,
712 * 729 *
713 * For this reason clients and drivers should not call the 730 * For this reason clients and drivers should not call the
714 * unregistration functions will holdling any locks. 731 * unregistration functions will holdling any locks.
715 *
716 * It tempting to drop the client_data_rwsem too, but this is required
717 * to ensure that unregister_client does not return until all clients
718 * are completely unregistered, which is required to avoid module
719 * unloading races.
720 */ 732 */
721 if (client->remove) 733 if (client->remove)
722 client->remove(device, client_data); 734 client->remove(device, client_data);
723 735
724 xa_erase(&device->client_data, client_id); 736 xa_erase(&device->client_data, client_id);
725 up_read(&device->client_data_rwsem); 737 ib_device_put(device);
738 ib_client_put(client);
726} 739}
727 740
728static int alloc_port_data(struct ib_device *device) 741static int alloc_port_data(struct ib_device *device)
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device)
1224 1237
1225static void disable_device(struct ib_device *device) 1238static void disable_device(struct ib_device *device)
1226{ 1239{
1227 struct ib_client *client; 1240 u32 cid;
1228 1241
1229 WARN_ON(!refcount_read(&device->refcount)); 1242 WARN_ON(!refcount_read(&device->refcount));
1230 1243
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device)
1232 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); 1245 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1233 up_write(&devices_rwsem); 1246 up_write(&devices_rwsem);
1234 1247
1248 /*
1249 * Remove clients in LIFO order, see assign_client_id. This could be
1250 * more efficient if xarray learns to reverse iterate. Since no new
1251 * clients can be added to this ib_device past this point we only need
1252 * the maximum possible client_id value here.
1253 */
1235 down_read(&clients_rwsem); 1254 down_read(&clients_rwsem);
1236 list_for_each_entry_reverse(client, &client_list, list) 1255 cid = highest_client_id;
1237 remove_client_context(device, client->client_id);
1238 up_read(&clients_rwsem); 1256 up_read(&clients_rwsem);
1257 while (cid) {
1258 cid--;
1259 remove_client_context(device, cid);
1260 }
1239 1261
1240 /* Pairs with refcount_set in enable_device */ 1262 /* Pairs with refcount_set in enable_device */
1241 ib_device_put(device); 1263 ib_device_put(device);
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client)
1662 /* 1684 /*
1663 * The add/remove callbacks must be called in FIFO/LIFO order. To 1685 * The add/remove callbacks must be called in FIFO/LIFO order. To
1664 * achieve this we assign client_ids so they are sorted in 1686 * achieve this we assign client_ids so they are sorted in
1665 * registration order, and retain a linked list we can reverse iterate 1687 * registration order.
1666 * to get the LIFO order. The extra linked list can go away if xarray
1667 * learns to reverse iterate.
1668 */ 1688 */
1669 if (list_empty(&client_list)) { 1689 client->client_id = highest_client_id;
1670 client->client_id = 0;
1671 } else {
1672 struct ib_client *last;
1673
1674 last = list_last_entry(&client_list, struct ib_client, list);
1675 client->client_id = last->client_id + 1;
1676 }
1677 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); 1690 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1678 if (ret) 1691 if (ret)
1679 goto out; 1692 goto out;
1680 1693
1694 highest_client_id++;
1681 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); 1695 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1682 list_add_tail(&client->list, &client_list);
1683 1696
1684out: 1697out:
1685 up_write(&clients_rwsem); 1698 up_write(&clients_rwsem);
1686 return ret; 1699 return ret;
1687} 1700}
1688 1701
1702static void remove_client_id(struct ib_client *client)
1703{
1704 down_write(&clients_rwsem);
1705 xa_erase(&clients, client->client_id);
1706 for (; highest_client_id; highest_client_id--)
1707 if (xa_load(&clients, highest_client_id - 1))
1708 break;
1709 up_write(&clients_rwsem);
1710}
1711
1689/** 1712/**
1690 * ib_register_client - Register an IB client 1713 * ib_register_client - Register an IB client
1691 * @client:Client to register 1714 * @client:Client to register
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client)
1705 unsigned long index; 1728 unsigned long index;
1706 int ret; 1729 int ret;
1707 1730
1731 refcount_set(&client->uses, 1);
1732 init_completion(&client->uses_zero);
1708 ret = assign_client_id(client); 1733 ret = assign_client_id(client);
1709 if (ret) 1734 if (ret)
1710 return ret; 1735 return ret;
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client)
1740 unsigned long index; 1765 unsigned long index;
1741 1766
1742 down_write(&clients_rwsem); 1767 down_write(&clients_rwsem);
1768 ib_client_put(client);
1743 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); 1769 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1744 up_write(&clients_rwsem); 1770 up_write(&clients_rwsem);
1745 /* 1771
1746 * Every device still known must be serialized to make sure we are 1772 /* We do not want to have locks while calling client->remove() */
1747 * done with the client callbacks before we return. 1773 rcu_read_lock();
1748 */ 1774 xa_for_each (&devices, index, device) {
1749 down_read(&devices_rwsem); 1775 if (!ib_device_try_get(device))
1750 xa_for_each (&devices, index, device) 1776 continue;
1777 rcu_read_unlock();
1778
1751 remove_client_context(device, client->client_id); 1779 remove_client_context(device, client->client_id);
1752 up_read(&devices_rwsem);
1753 1780
1754 down_write(&clients_rwsem); 1781 ib_device_put(device);
1755 list_del(&client->list); 1782 rcu_read_lock();
1756 xa_erase(&clients, client->client_id); 1783 }
1757 up_write(&clients_rwsem); 1784 rcu_read_unlock();
1785
1786 /*
1787 * remove_client_context() is not a fence, it can return even though a
1788 * removal is ongoing. Wait until all removals are completed.
1789 */
1790 wait_for_completion(&client->uses_zero);
1791 remove_client_id(client);
1758} 1792}
1759EXPORT_SYMBOL(ib_unregister_client); 1793EXPORT_SYMBOL(ib_unregister_client);
1760 1794
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cc99479b2c09..9947d16edef2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
3224 if (has_smi) 3224 if (has_smi)
3225 cq_size *= 2; 3225 cq_size *= 2;
3226 3226
3227 port_priv->pd = ib_alloc_pd(device, 0);
3228 if (IS_ERR(port_priv->pd)) {
3229 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3230 ret = PTR_ERR(port_priv->pd);
3231 goto error3;
3232 }
3233
3227 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3234 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3228 IB_POLL_UNBOUND_WORKQUEUE); 3235 IB_POLL_UNBOUND_WORKQUEUE);
3229 if (IS_ERR(port_priv->cq)) { 3236 if (IS_ERR(port_priv->cq)) {
3230 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3237 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3231 ret = PTR_ERR(port_priv->cq); 3238 ret = PTR_ERR(port_priv->cq);
3232 goto error3;
3233 }
3234
3235 port_priv->pd = ib_alloc_pd(device, 0);
3236 if (IS_ERR(port_priv->pd)) {
3237 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3238 ret = PTR_ERR(port_priv->pd);
3239 goto error4; 3239 goto error4;
3240 } 3240 }
3241 3241
@@ -3278,11 +3278,11 @@ error8:
3278error7: 3278error7:
3279 destroy_mad_qp(&port_priv->qp_info[0]); 3279 destroy_mad_qp(&port_priv->qp_info[0]);
3280error6: 3280error6:
3281 ib_dealloc_pd(port_priv->pd);
3282error4:
3283 ib_free_cq(port_priv->cq); 3281 ib_free_cq(port_priv->cq);
3284 cleanup_recv_queue(&port_priv->qp_info[1]); 3282 cleanup_recv_queue(&port_priv->qp_info[1]);
3285 cleanup_recv_queue(&port_priv->qp_info[0]); 3283 cleanup_recv_queue(&port_priv->qp_info[0]);
3284error4:
3285 ib_dealloc_pd(port_priv->pd);
3286error3: 3286error3:
3287 kfree(port_priv); 3287 kfree(port_priv);
3288 3288
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
3312 destroy_workqueue(port_priv->wq); 3312 destroy_workqueue(port_priv->wq);
3313 destroy_mad_qp(&port_priv->qp_info[1]); 3313 destroy_mad_qp(&port_priv->qp_info[1]);
3314 destroy_mad_qp(&port_priv->qp_info[0]); 3314 destroy_mad_qp(&port_priv->qp_info[0]);
3315 ib_dealloc_pd(port_priv->pd);
3316 ib_free_cq(port_priv->cq); 3315 ib_free_cq(port_priv->cq);
3316 ib_dealloc_pd(port_priv->pd);
3317 cleanup_recv_queue(&port_priv->qp_info[1]); 3317 cleanup_recv_queue(&port_priv->qp_info[1]);
3318 cleanup_recv_queue(&port_priv->qp_info[0]); 3318 cleanup_recv_queue(&port_priv->qp_info[0]);
3319 /* XXX: Handle deallocation of MAD registration tables */ 3319 /* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 783e465e7c41..020c26976558 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
383 if (!names[i]) 383 if (!names[i])
384 continue; 384 continue;
385 curr = rdma_restrack_count(device, i, 385 curr = rdma_restrack_count(device, i);
386 task_active_pid_ns(current));
387 ret = fill_res_info_entry(msg, names[i], curr); 386 ret = fill_res_info_entry(msg, names[i], curr);
388 if (ret) 387 if (ret)
389 goto err; 388 goto err;
@@ -1952,12 +1951,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
1952 1951
1953 if (fill_nldev_handle(msg, device) || 1952 if (fill_nldev_handle(msg, device) ||
1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 1953 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1955 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) 1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
1955 ret = -EMSGSIZE;
1956 goto err_msg; 1956 goto err_msg;
1957 }
1957 1958
1958 if ((mode == RDMA_COUNTER_MODE_AUTO) && 1959 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
1959 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) 1960 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
1961 ret = -EMSGSIZE;
1960 goto err_msg; 1962 goto err_msg;
1963 }
1961 1964
1962 nlmsg_end(msg, nlh); 1965 nlmsg_end(msg, nlh);
1963 ib_device_put(device); 1966 ib_device_put(device);
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
107 * rdma_restrack_count() - the current usage of specific object 107 * rdma_restrack_count() - the current usage of specific object
108 * @dev: IB device 108 * @dev: IB device
109 * @type: actual type of object to operate 109 * @type: actual type of object to operate
110 * @ns: PID namespace
111 */ 110 */
112int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, 111int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
113 struct pid_namespace *ns)
114{ 112{
115 struct rdma_restrack_root *rt = &dev->res[type]; 113 struct rdma_restrack_root *rt = &dev->res[type];
116 struct rdma_restrack_entry *e; 114 struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
119 117
120 xa_lock(&rt->xa); 118 xa_lock(&rt->xa);
121 xas_for_each(&xas, e, U32_MAX) { 119 xas_for_each(&xas, e, U32_MAX) {
122 if (ns == &init_pid_ns || 120 if (!rdma_is_visible_in_pid_ns(e))
123 (!rdma_is_kernel_res(e) && 121 continue;
124 ns == task_active_pid_ns(e->task))) 122 cnt++;
125 cnt++;
126 } 123 }
127 xa_unlock(&rt->xa); 124 xa_unlock(&rt->xa);
128 return cnt; 125 return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
360 */ 357 */
361 if (rdma_is_kernel_res(res)) 358 if (rdma_is_kernel_res(res))
362 return task_active_pid_ns(current) == &init_pid_ns; 359 return task_active_pid_ns(current) == &init_pid_ns;
363 return task_active_pid_ns(current) == task_active_pid_ns(res->task); 360
361 /* PID 0 means that resource is not found in current namespace */
362 return task_pid_vnr(res->task);
364} 363}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..56553668256f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
379 379
380int ib_umem_page_count(struct ib_umem *umem) 380int ib_umem_page_count(struct ib_umem *umem)
381{ 381{
382 int i; 382 int i, n = 0;
383 int n;
384 struct scatterlist *sg; 383 struct scatterlist *sg;
385 384
386 if (umem->is_odp)
387 return ib_umem_num_pages(umem);
388
389 n = 0;
390 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 385 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
391 n += sg_dma_len(sg) >> PAGE_SHIFT; 386 n += sg_dma_len(sg) >> PAGE_SHIFT;
392 387
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2a75c6f8d827..c0e15db34680 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
112 * prevent any further fault handling on this MR. 112 * prevent any further fault handling on this MR.
113 */ 113 */
114 ib_umem_notifier_start_account(umem_odp); 114 ib_umem_notifier_start_account(umem_odp);
115 umem_odp->dying = 1;
116 /* Make sure that the fact the umem is dying is out before we release
117 * all pending page faults. */
118 smp_wmb();
119 complete_all(&umem_odp->notifier_completion); 115 complete_all(&umem_odp->notifier_completion);
120 umem_odp->umem.context->invalidate_range( 116 umem_odp->umem.context->invalidate_range(
121 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); 117 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 9f8a48016b41..ffdeaf6e0b68 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
49#include <linux/sched.h> 49#include <linux/sched.h>
50#include <linux/semaphore.h> 50#include <linux/semaphore.h>
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/nospec.h>
52 53
53#include <linux/uaccess.h> 54#include <linux/uaccess.h>
54 55
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
884 885
885 if (get_user(id, arg)) 886 if (get_user(id, arg))
886 return -EFAULT; 887 return -EFAULT;
888 if (id >= IB_UMAD_MAX_AGENTS)
889 return -EINVAL;
887 890
888 mutex_lock(&file->port->file_mutex); 891 mutex_lock(&file->port->file_mutex);
889 mutex_lock(&file->mutex); 892 mutex_lock(&file->mutex);
890 893
891 if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { 894 id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
895 if (!__get_agent(file, id)) {
892 ret = -EINVAL; 896 ret = -EINVAL;
893 goto out; 897 goto out;
894 } 898 }
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a91653aabf38..098ab883733e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); 308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
309 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 309 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
310 struct bnxt_qplib_gid *gid_to_del; 310 struct bnxt_qplib_gid *gid_to_del;
311 u16 vlan_id = 0xFFFF;
311 312
312 /* Delete the entry from the hardware */ 313 /* Delete the entry from the hardware */
313 ctx = *context; 314 ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
317 if (sgid_tbl && sgid_tbl->active) { 318 if (sgid_tbl && sgid_tbl->active) {
318 if (ctx->idx >= sgid_tbl->max) 319 if (ctx->idx >= sgid_tbl->max)
319 return -EINVAL; 320 return -EINVAL;
320 gid_to_del = &sgid_tbl->tbl[ctx->idx]; 321 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
322 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
321 /* DEL_GID is called in WQ context(netdevice_event_work_handler) 323 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
322 * or via the ib_unregister_device path. In the former case QP1 324 * or via the ib_unregister_device path. In the former case QP1
323 * may not be destroyed yet, in which case just return as FW 325 * may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
335 } 337 }
336 ctx->refcnt--; 338 ctx->refcnt--;
337 if (!ctx->refcnt) { 339 if (!ctx->refcnt) {
338 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); 340 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
341 vlan_id, true);
339 if (rc) { 342 if (rc) {
340 dev_err(rdev_to_dev(rdev), 343 dev_err(rdev_to_dev(rdev),
341 "Failed to remove GID: %#x", rc); 344 "Failed to remove GID: %#x", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
136 spin_unlock_irqrestore(&cmdq->lock, flags); 136 spin_unlock_irqrestore(&cmdq->lock, flags);
137 return -EBUSY; 137 return -EBUSY;
138 } 138 }
139
140 size = req->cmd_size;
141 /* change the cmd_size to the number of 16byte cmdq unit.
142 * req->cmd_size is modified here
143 */
144 bnxt_qplib_set_cmd_slots(req);
145
139 memset(resp, 0, sizeof(*resp)); 146 memset(resp, 0, sizeof(*resp));
140 crsqe->resp = (struct creq_qp_event *)resp; 147 crsqe->resp = (struct creq_qp_event *)resp;
141 crsqe->resp->cookie = req->cookie; 148 crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
150 157
151 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 158 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
152 preq = (u8 *)req; 159 preq = (u8 *)req;
153 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
154 do { 160 do {
155 /* Locate the next cmdq slot */ 161 /* Locate the next cmdq slot */
156 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 162 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
55 do { \ 55 do { \
56 memset(&(req), 0, sizeof((req))); \ 56 memset(&(req), 0, sizeof((req))); \
57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ 57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
58 (req).cmd_size = (sizeof((req)) + \ 58 (req).cmd_size = sizeof((req)); \
59 BNXT_QPLIB_CMDQE_UNITS - 1) / \
60 BNXT_QPLIB_CMDQE_UNITS; \
61 (req).flags = cpu_to_le16(cmd_flags); \ 59 (req).flags = cpu_to_le16(cmd_flags); \
62 } while (0) 60 } while (0)
63 61
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
95 BNXT_QPLIB_CMDQE_UNITS); 93 BNXT_QPLIB_CMDQE_UNITS);
96} 94}
97 95
96/* Set the cmd_size to a factor of CMDQE unit */
97static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
98{
99 req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
100 BNXT_QPLIB_CMDQE_UNITS;
101}
102
98#define MAX_CMDQ_IDX(depth) ((depth) - 1) 103#define MAX_CMDQ_IDX(depth) ((depth) - 1)
99 104
100static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) 105static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 37928b1111df..bdbde8e22420 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
488 struct bnxt_qplib_sgid_tbl *sgid_tbl, 488 struct bnxt_qplib_sgid_tbl *sgid_tbl,
489 u16 max) 489 u16 max)
490{ 490{
491 sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL); 491 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
492 if (!sgid_tbl->tbl) 492 if (!sgid_tbl->tbl)
493 return -ENOMEM; 493 return -ENOMEM;
494 494
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
526 for (i = 0; i < sgid_tbl->max; i++) { 526 for (i = 0; i < sgid_tbl->max; i++) {
527 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, 527 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
528 sizeof(bnxt_qplib_gid_zero))) 528 sizeof(bnxt_qplib_gid_zero)))
529 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true); 529 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
530 sgid_tbl->tbl[i].vlan_id, true);
530 } 531 }
531 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); 532 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
532 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 533 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
533 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); 534 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
534 sgid_tbl->active = 0; 535 sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
537static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, 538static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
538 struct net_device *netdev) 539 struct net_device *netdev)
539{ 540{
540 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); 541 u32 i;
542
543 for (i = 0; i < sgid_tbl->max; i++)
544 sgid_tbl->tbl[i].vlan_id = 0xffff;
545
541 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 546 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
542} 547}
543 548
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 30c42c92fac7..fbda11a7ab1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
111}; 111};
112 112
113struct bnxt_qplib_sgid_tbl { 113struct bnxt_qplib_sgid_tbl {
114 struct bnxt_qplib_gid *tbl; 114 struct bnxt_qplib_gid_info *tbl;
115 u16 *hw_id; 115 u16 *hw_id;
116 u16 max; 116 u16 max;
117 u16 active; 117 u16 active;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 48793d3512ac..40296b97d21e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
213 index, sgid_tbl->max); 213 index, sgid_tbl->max);
214 return -EINVAL; 214 return -EINVAL;
215 } 215 }
216 memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); 216 memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
217 return 0; 217 return 0;
218} 218}
219 219
220int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 220int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
221 struct bnxt_qplib_gid *gid, bool update) 221 struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
222{ 222{
223 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, 223 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
224 struct bnxt_qplib_res, 224 struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
236 return -ENOMEM; 236 return -ENOMEM;
237 } 237 }
238 for (index = 0; index < sgid_tbl->max; index++) { 238 for (index = 0; index < sgid_tbl->max; index++) {
239 if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid))) 239 if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
240 vlan_id == sgid_tbl->tbl[index].vlan_id)
240 break; 241 break;
241 } 242 }
242 if (index == sgid_tbl->max) { 243 if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
262 if (rc) 263 if (rc)
263 return rc; 264 return rc;
264 } 265 }
265 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 266 memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
266 sizeof(bnxt_qplib_gid_zero)); 267 sizeof(bnxt_qplib_gid_zero));
268 sgid_tbl->tbl[index].vlan_id = 0xFFFF;
267 sgid_tbl->vlan[index] = 0; 269 sgid_tbl->vlan[index] = 0;
268 sgid_tbl->active--; 270 sgid_tbl->active--;
269 dev_dbg(&res->pdev->dev, 271 dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
296 } 298 }
297 free_idx = sgid_tbl->max; 299 free_idx = sgid_tbl->max;
298 for (i = 0; i < sgid_tbl->max; i++) { 300 for (i = 0; i < sgid_tbl->max; i++) {
299 if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) { 301 if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
302 sgid_tbl->tbl[i].vlan_id == vlan_id) {
300 dev_dbg(&res->pdev->dev, 303 dev_dbg(&res->pdev->dev,
301 "SGID entry already exist in entry %d!\n", i); 304 "SGID entry already exist in entry %d!\n", i);
302 *index = i; 305 *index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
351 } 354 }
352 /* Add GID to the sgid_tbl */ 355 /* Add GID to the sgid_tbl */
353 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); 356 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
357 sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
354 sgid_tbl->active++; 358 sgid_tbl->active++;
355 if (vlan_id != 0xFFFF) 359 if (vlan_id != 0xFFFF)
356 sgid_tbl->vlan[free_idx] = 1; 360 sgid_tbl->vlan[free_idx] = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 0ec3b12b0bcd..13d9432d5ce2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
84 u8 data[16]; 84 u8 data[16];
85}; 85};
86 86
87struct bnxt_qplib_gid_info {
88 struct bnxt_qplib_gid gid;
89 u16 vlan_id;
90};
91
87struct bnxt_qplib_ah { 92struct bnxt_qplib_ah {
88 struct bnxt_qplib_gid dgid; 93 struct bnxt_qplib_gid dgid;
89 struct bnxt_qplib_pd *pd; 94 struct bnxt_qplib_pd *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
221 struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, 226 struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
222 struct bnxt_qplib_gid *gid); 227 struct bnxt_qplib_gid *gid);
223int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 228int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
224 struct bnxt_qplib_gid *gid, bool update); 229 struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
225int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 230int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
226 struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, 231 struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
227 bool update, u32 *index); 232 bool update, u32 *index);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d5b643a1d9fd..67052dc3100c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14452 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14452 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14453} 14453}
14454 14454
14455static void init_rxe(struct hfi1_devdata *dd) 14455static int init_rxe(struct hfi1_devdata *dd)
14456{ 14456{
14457 struct rsm_map_table *rmt; 14457 struct rsm_map_table *rmt;
14458 u64 val; 14458 u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
14461 write_csr(dd, RCV_ERR_MASK, ~0ull); 14461 write_csr(dd, RCV_ERR_MASK, ~0ull);
14462 14462
14463 rmt = alloc_rsm_map_table(dd); 14463 rmt = alloc_rsm_map_table(dd);
14464 if (!rmt)
14465 return -ENOMEM;
14466
14464 /* set up QOS, including the QPN map table */ 14467 /* set up QOS, including the QPN map table */
14465 init_qos(dd, rmt); 14468 init_qos(dd, rmt);
14466 init_fecn_handling(dd, rmt); 14469 init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
14487 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << 14490 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14488 RCV_BYPASS_HDR_SIZE_SHIFT); 14491 RCV_BYPASS_HDR_SIZE_SHIFT);
14489 write_csr(dd, RCV_BYPASS, val); 14492 write_csr(dd, RCV_BYPASS, val);
14493 return 0;
14490} 14494}
14491 14495
14492static void init_other(struct hfi1_devdata *dd) 14496static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
15024 goto bail_cleanup; 15028 goto bail_cleanup;
15025 15029
15026 /* set initial RXE CSRs */ 15030 /* set initial RXE CSRs */
15027 init_rxe(dd); 15031 ret = init_rxe(dd);
15032 if (ret)
15033 goto bail_cleanup;
15034
15028 /* set initial TXE CSRs */ 15035 /* set initial TXE CSRs */
15029 init_txe(dd); 15036 init_txe(dd);
15030 /* set initial non-RXE, non-TXE CSRs */ 15037 /* set initial non-RXE, non-TXE CSRs */
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
141 if (!data) 141 if (!data)
142 return -ENOMEM; 142 return -ENOMEM;
143 copy = min(len, datalen - 1); 143 copy = min(len, datalen - 1);
144 if (copy_from_user(data, buf, copy)) 144 if (copy_from_user(data, buf, copy)) {
145 return -EFAULT; 145 ret = -EFAULT;
146 goto free_data;
147 }
146 148
147 ret = debugfs_file_get(file->f_path.dentry); 149 ret = debugfs_file_get(file->f_path.dentry);
148 if (unlikely(ret)) 150 if (unlikely(ret))
149 return ret; 151 goto free_data;
150 ptr = data; 152 ptr = data;
151 token = ptr; 153 token = ptr;
152 for (ptr = data; *ptr; ptr = end + 1, token = ptr) { 154 for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
195 ret = len; 197 ret = len;
196 198
197 debugfs_file_put(file->f_path.dentry); 199 debugfs_file_put(file->f_path.dentry);
200free_data:
198 kfree(data); 201 kfree(data);
199 return ret; 202 return ret;
200} 203}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
214 return -ENOMEM; 217 return -ENOMEM;
215 ret = debugfs_file_get(file->f_path.dentry); 218 ret = debugfs_file_get(file->f_path.dentry);
216 if (unlikely(ret)) 219 if (unlikely(ret))
217 return ret; 220 goto free_data;
218 bit = find_first_bit(fault->opcodes, bitsize); 221 bit = find_first_bit(fault->opcodes, bitsize);
219 while (bit < bitsize) { 222 while (bit < bitsize) {
220 zero = find_next_zero_bit(fault->opcodes, bitsize, bit); 223 zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
232 data[size - 1] = '\n'; 235 data[size - 1] = '\n';
233 data[size] = '\0'; 236 data[size] = '\0';
234 ret = simple_read_from_buffer(buf, len, pos, data, size); 237 ret = simple_read_from_buffer(buf, len, pos, data, size);
238free_data:
235 kfree(data); 239 kfree(data);
236 return ret; 240 return ret;
237} 241}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0477c14633ab..024a7c2b6124 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1835 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) 1835 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1836 break; 1836 break;
1837 trdma_clean_swqe(qp, wqe); 1837 trdma_clean_swqe(qp, wqe);
1838 rvt_qp_wqe_unreserve(qp, wqe);
1839 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); 1838 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1840 rvt_qp_complete_swqe(qp, 1839 rvt_qp_complete_swqe(qp,
1841 wqe, 1840 wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1882 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || 1881 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1883 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1882 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1884 trdma_clean_swqe(qp, wqe); 1883 trdma_clean_swqe(qp, wqe);
1885 rvt_qp_wqe_unreserve(qp, wqe);
1886 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); 1884 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1887 rvt_qp_complete_swqe(qp, 1885 rvt_qp_complete_swqe(qp,
1888 wqe, 1886 wqe,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92acccaaaa86..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
1620 flows[i].req = req; 1620 flows[i].req = req;
1621 flows[i].npagesets = 0; 1621 flows[i].npagesets = 0;
1622 flows[i].pagesets[0].mapped = 0; 1622 flows[i].pagesets[0].mapped = 0;
1623 flows[i].resync_npkts = 0;
1623 } 1624 }
1624 req->flows = flows; 1625 req->flows = flows;
1625 return 0; 1626 return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
1673 return NULL; 1674 return NULL;
1674} 1675}
1675 1676
1676static struct tid_rdma_flow *
1677__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
1678 u32 psn, u16 *fidx)
1679{
1680 for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
1681 tail = CIRC_NEXT(tail, MAX_FLOWS)) {
1682 struct tid_rdma_flow *flow = &req->flows[tail];
1683 u32 spsn, lpsn;
1684
1685 spsn = full_flow_psn(flow, flow->flow_state.spsn);
1686 lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
1687
1688 if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
1689 if (fidx)
1690 *fidx = tail;
1691 return flow;
1692 }
1693 }
1694 return NULL;
1695}
1696
1697static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
1698 u32 psn, u16 *fidx)
1699{
1700 return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
1701 fidx);
1702}
1703
1704/* TID RDMA READ functions */ 1677/* TID RDMA READ functions */
1705u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 1678u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
1706 struct ib_other_headers *ohdr, u32 *bth1, 1679 struct ib_other_headers *ohdr, u32 *bth1,
@@ -2601,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2601 hfi1_kern_clear_hw_flow(priv->rcd, qp); 2574 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2602} 2575}
2603 2576
2604static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, 2577static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
2605 struct hfi1_packet *packet, u8 rcv_type,
2606 u8 opcode)
2607{ 2578{
2608 struct rvt_qp *qp = packet->qp; 2579 struct rvt_qp *qp = packet->qp;
2609 struct hfi1_qp_priv *qpriv = qp->priv;
2610 u32 ipsn;
2611 struct ib_other_headers *ohdr = packet->ohdr;
2612 struct rvt_ack_entry *e;
2613 struct tid_rdma_request *req;
2614 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2615 u32 i;
2616 2580
2617 if (rcv_type >= RHF_RCV_TYPE_IB) 2581 if (rcv_type >= RHF_RCV_TYPE_IB)
2618 goto done; 2582 goto done;
@@ -2629,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2629 if (rcv_type == RHF_RCV_TYPE_EAGER) { 2593 if (rcv_type == RHF_RCV_TYPE_EAGER) {
2630 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); 2594 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2631 hfi1_schedule_send(qp); 2595 hfi1_schedule_send(qp);
2632 goto done_unlock;
2633 }
2634
2635 /*
2636 * For TID READ response, error out QP after freeing the tid
2637 * resources.
2638 */
2639 if (opcode == TID_OP(READ_RESP)) {
2640 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2641 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2642 cmp_psn(ipsn, qp->s_psn) < 0) {
2643 hfi1_kern_read_tid_flow_free(qp);
2644 spin_unlock(&qp->s_lock);
2645 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2646 goto done;
2647 }
2648 goto done_unlock;
2649 } 2596 }
2650 2597
2651 /* 2598 /* Since no payload is delivered, just drop the packet */
2652 * Error out the qp for TID RDMA WRITE
2653 */
2654 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2655 for (i = 0; i < rvt_max_atomic(rdi); i++) {
2656 e = &qp->s_ack_queue[i];
2657 if (e->opcode == TID_OP(WRITE_REQ)) {
2658 req = ack_to_tid_req(e);
2659 hfi1_kern_exp_rcv_clear_all(req);
2660 }
2661 }
2662 spin_unlock(&qp->s_lock);
2663 rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2664 goto done;
2665
2666done_unlock:
2667 spin_unlock(&qp->s_lock); 2599 spin_unlock(&qp->s_lock);
2668done: 2600done:
2669 return true; 2601 return true;
@@ -2714,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2714 u32 fpsn; 2646 u32 fpsn;
2715 2647
2716 lockdep_assert_held(&qp->r_lock); 2648 lockdep_assert_held(&qp->r_lock);
2649 spin_lock(&qp->s_lock);
2717 /* If the psn is out of valid range, drop the packet */ 2650 /* If the psn is out of valid range, drop the packet */
2718 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || 2651 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2719 cmp_psn(ibpsn, qp->s_psn) > 0) 2652 cmp_psn(ibpsn, qp->s_psn) > 0)
2720 return ret; 2653 goto s_unlock;
2721 2654
2722 spin_lock(&qp->s_lock);
2723 /* 2655 /*
2724 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2656 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2725 * requests and implicitly NAK RDMA read and atomic requests issued 2657 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -2767,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2767 2699
2768 wqe = do_rc_completion(qp, wqe, ibp); 2700 wqe = do_rc_completion(qp, wqe, ibp);
2769 if (qp->s_acked == qp->s_tail) 2701 if (qp->s_acked == qp->s_tail)
2770 break; 2702 goto s_unlock;
2771 } 2703 }
2772 2704
2705 if (qp->s_acked == qp->s_tail)
2706 goto s_unlock;
2707
2773 /* Handle the eflags for the request */ 2708 /* Handle the eflags for the request */
2774 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 2709 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2775 goto s_unlock; 2710 goto s_unlock;
@@ -2788,19 +2723,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2788 * to prevent continuous Flow Sequence errors for any 2723 * to prevent continuous Flow Sequence errors for any
2789 * packets that could be still in the fabric. 2724 * packets that could be still in the fabric.
2790 */ 2725 */
2791 flow = find_flow(req, psn, NULL); 2726 flow = &req->flows[req->clear_tail];
2792 if (!flow) {
2793 /*
2794 * We can't find the IB PSN matching the
2795 * received KDETH PSN. The only thing we can
2796 * do at this point is report the error to
2797 * the QP.
2798 */
2799 hfi1_kern_read_tid_flow_free(qp);
2800 spin_unlock(&qp->s_lock);
2801 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2802 return ret;
2803 }
2804 if (priv->s_flags & HFI1_R_TID_SW_PSN) { 2727 if (priv->s_flags & HFI1_R_TID_SW_PSN) {
2805 diff = cmp_psn(psn, 2728 diff = cmp_psn(psn,
2806 flow->flow_state.r_next_psn); 2729 flow->flow_state.r_next_psn);
@@ -2961,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2961 if (lnh == HFI1_LRH_GRH) 2884 if (lnh == HFI1_LRH_GRH)
2962 goto r_unlock; 2885 goto r_unlock;
2963 2886
2964 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) 2887 if (tid_rdma_tid_err(packet, rcv_type))
2965 goto r_unlock; 2888 goto r_unlock;
2966 } 2889 }
2967 2890
@@ -2981,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2981 */ 2904 */
2982 spin_lock(&qp->s_lock); 2905 spin_lock(&qp->s_lock);
2983 qpriv = qp->priv; 2906 qpriv = qp->priv;
2907 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
2908 qpriv->r_tid_tail == qpriv->r_tid_head)
2909 goto unlock;
2984 e = &qp->s_ack_queue[qpriv->r_tid_tail]; 2910 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2911 if (e->opcode != TID_OP(WRITE_REQ))
2912 goto unlock;
2985 req = ack_to_tid_req(e); 2913 req = ack_to_tid_req(e);
2914 if (req->comp_seg == req->cur_seg)
2915 goto unlock;
2986 flow = &req->flows[req->clear_tail]; 2916 flow = &req->flows[req->clear_tail];
2987 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); 2917 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2988 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); 2918 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4548,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4548 struct rvt_swqe *wqe; 4478 struct rvt_swqe *wqe;
4549 struct tid_rdma_request *req; 4479 struct tid_rdma_request *req;
4550 struct tid_rdma_flow *flow; 4480 struct tid_rdma_flow *flow;
4551 u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; 4481 u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
4552 unsigned long flags; 4482 unsigned long flags;
4553 u16 fidx; 4483 u16 fidx;
4554 4484
@@ -4577,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4577 ack_kpsn--; 4507 ack_kpsn--;
4578 } 4508 }
4579 4509
4510 if (unlikely(qp->s_acked == qp->s_tail))
4511 goto ack_op_err;
4512
4580 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 4513 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4581 4514
4582 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 4515 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4589,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4589 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); 4522 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4590 4523
4591 /* Drop stale ACK/NAK */ 4524 /* Drop stale ACK/NAK */
4592 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) 4525 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4526 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4593 goto ack_op_err; 4527 goto ack_op_err;
4594 4528
4595 while (cmp_psn(ack_kpsn, 4529 while (cmp_psn(ack_kpsn,
@@ -4751,7 +4685,12 @@ done:
4751 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 4685 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4752 IB_AETH_CREDIT_MASK) { 4686 IB_AETH_CREDIT_MASK) {
4753 case 0: /* PSN sequence error */ 4687 case 0: /* PSN sequence error */
4688 if (!req->flows)
4689 break;
4754 flow = &req->flows[req->acked_tail]; 4690 flow = &req->flows[req->acked_tail];
4691 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4692 if (cmp_psn(psn, flpsn) > 0)
4693 break;
4755 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, 4694 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4756 flow); 4695 flow);
4757 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); 4696 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index c4b243f50c76..646f61545ed6 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -54,6 +54,7 @@
54#include <linux/mm.h> 54#include <linux/mm.h>
55#include <linux/vmalloc.h> 55#include <linux/vmalloc.h>
56#include <rdma/opa_addr.h> 56#include <rdma/opa_addr.h>
57#include <linux/nospec.h>
57 58
58#include "hfi.h" 59#include "hfi.h"
59#include "common.h" 60#include "common.h"
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1536 sl = rdma_ah_get_sl(ah_attr); 1537 sl = rdma_ah_get_sl(ah_attr);
1537 if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) 1538 if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
1538 return -EINVAL; 1539 return -EINVAL;
1540 sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
1539 1541
1540 sc5 = ibp->sl_to_sc[sl]; 1542 sc5 = ibp->sl_to_sc[sl];
1541 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) 1543 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 8bf847bcd8d3..54782197c717 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2config INFINIBAND_HNS 2config INFINIBAND_HNS
3 tristate "HNS RoCE Driver" 3 bool "HNS RoCE Driver"
4 depends on NET_VENDOR_HISILICON 4 depends on NET_VENDOR_HISILICON
5 depends on ARM64 || (COMPILE_TEST && 64BIT) 5 depends on ARM64 || (COMPILE_TEST && 64BIT)
6 ---help--- 6 ---help---
@@ -11,7 +11,7 @@ config INFINIBAND_HNS
11 To compile HIP06 or HIP08 driver as module, choose M here. 11 To compile HIP06 or HIP08 driver as module, choose M here.
12 12
13config INFINIBAND_HNS_HIP06 13config INFINIBAND_HNS_HIP06
14 bool "Hisilicon Hip06 Family RoCE support" 14 tristate "Hisilicon Hip06 Family RoCE support"
15 depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET 15 depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
16 ---help--- 16 ---help---
17 RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and 17 RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06
21 module will be called hns-roce-hw-v1 21 module will be called hns-roce-hw-v1
22 22
23config INFINIBAND_HNS_HIP08 23config INFINIBAND_HNS_HIP08
24 bool "Hisilicon Hip08 Family RoCE support" 24 tristate "Hisilicon Hip08 Family RoCE support"
25 depends on INFINIBAND_HNS && PCI && HNS3 25 depends on INFINIBAND_HNS && PCI && HNS3
26 ---help--- 26 ---help---
27 RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. 27 RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..449a2d81319d 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
9 hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ 9 hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
10 hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o 10 hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
11 11
12ifdef CONFIG_INFINIBAND_HNS_HIP06
13hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) 12hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
14obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o 13obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
15endif
16 14
17ifdef CONFIG_INFINIBAND_HNS_HIP08
18hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) 15hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
19obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o 16obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
20endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 627aa46ef683..c00714c2f16a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
12 struct ib_udata *udata, unsigned long virt, 12 struct ib_udata *udata, unsigned long virt,
13 struct hns_roce_db *db) 13 struct hns_roce_db *db)
14{ 14{
15 unsigned long page_addr = virt & PAGE_MASK;
15 struct hns_roce_user_db_page *page; 16 struct hns_roce_user_db_page *page;
17 unsigned int offset;
16 int ret = 0; 18 int ret = 0;
17 19
18 mutex_lock(&context->page_mutex); 20 mutex_lock(&context->page_mutex);
19 21
20 list_for_each_entry(page, &context->page_list, list) 22 list_for_each_entry(page, &context->page_list, list)
21 if (page->user_virt == (virt & PAGE_MASK)) 23 if (page->user_virt == page_addr)
22 goto found; 24 goto found;
23 25
24 page = kmalloc(sizeof(*page), GFP_KERNEL); 26 page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
28 } 30 }
29 31
30 refcount_set(&page->refcount, 1); 32 refcount_set(&page->refcount, 1);
31 page->user_virt = (virt & PAGE_MASK); 33 page->user_virt = page_addr;
32 page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); 34 page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
33 if (IS_ERR(page->umem)) { 35 if (IS_ERR(page->umem)) {
34 ret = PTR_ERR(page->umem); 36 ret = PTR_ERR(page->umem);
35 kfree(page); 37 kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
39 list_add(&page->list, &context->page_list); 41 list_add(&page->list, &context->page_list);
40 42
41found: 43found:
42 db->dma = sg_dma_address(page->umem->sg_head.sgl) + 44 offset = virt - page_addr;
43 (virt & ~PAGE_MASK); 45 db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
44 page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; 46 db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
45 db->virt_addr = sg_virt(page->umem->sg_head.sgl);
46 db->u.user_page = page; 47 db->u.user_page = page;
47 refcount_inc(&page->refcount); 48 refcount_inc(&page->refcount);
48 49
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 81e6dedb1e02..c07e387a07a3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
750 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); 750 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
751 751
752 pd = rdma_zalloc_drv_obj(ibdev, ib_pd); 752 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
753 if (!pd) 753 if (!pd) {
754 ret = -ENOMEM;
754 goto alloc_mem_failed; 755 goto alloc_mem_failed;
756 }
755 757
756 pd->device = ibdev; 758 pd->device = ibdev;
757 ret = hns_roce_alloc_pd(pd, NULL); 759 ret = hns_roce_alloc_pd(pd, NULL);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
1677 tx_buf_size, DMA_TO_DEVICE); 1677 tx_buf_size, DMA_TO_DEVICE);
1678 kfree(tun_qp->tx_ring[i].buf.addr); 1678 kfree(tun_qp->tx_ring[i].buf.addr);
1679 } 1679 }
1680 kfree(tun_qp->tx_ring);
1681 tun_qp->tx_ring = NULL;
1682 i = MLX4_NUM_TUNNEL_BUFS; 1680 i = MLX4_NUM_TUNNEL_BUFS;
1683err: 1681err:
1684 while (i > 0) { 1682 while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
1687 rx_buf_size, DMA_FROM_DEVICE); 1685 rx_buf_size, DMA_FROM_DEVICE);
1688 kfree(tun_qp->ring[i].addr); 1686 kfree(tun_qp->ring[i].addr);
1689 } 1687 }
1688 kfree(tun_qp->tx_ring);
1689 tun_qp->tx_ring = NULL;
1690 kfree(tun_qp->ring); 1690 kfree(tun_qp->ring);
1691 tun_qp->ring = NULL; 1691 tun_qp->ring = NULL;
1692 return -ENOMEM; 1692 return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ec4370f99381..af5bbb35c058 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
2026 event_sub->eventfd = 2026 event_sub->eventfd =
2027 eventfd_ctx_fdget(redirect_fd); 2027 eventfd_ctx_fdget(redirect_fd);
2028 2028
2029 if (IS_ERR(event_sub)) { 2029 if (IS_ERR(event_sub->eventfd)) {
2030 err = PTR_ERR(event_sub->eventfd); 2030 err = PTR_ERR(event_sub->eventfd);
2031 event_sub->eventfd = NULL; 2031 event_sub->eventfd = NULL;
2032 goto err; 2032 goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2644 struct devx_async_event_file *ev_file = filp->private_data; 2644 struct devx_async_event_file *ev_file = filp->private_data;
2645 struct devx_event_subscription *event_sub, *event_sub_tmp; 2645 struct devx_event_subscription *event_sub, *event_sub_tmp;
2646 struct devx_async_event_data *entry, *tmp; 2646 struct devx_async_event_data *entry, *tmp;
2647 struct mlx5_ib_dev *dev = ev_file->dev;
2647 2648
2648 mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock); 2649 mutex_lock(&dev->devx_event_table.event_xa_lock);
2649 /* delete the subscriptions which are related to this FD */ 2650 /* delete the subscriptions which are related to this FD */
2650 list_for_each_entry_safe(event_sub, event_sub_tmp, 2651 list_for_each_entry_safe(event_sub, event_sub_tmp,
2651 &ev_file->subscribed_events_list, file_list) { 2652 &ev_file->subscribed_events_list, file_list) {
2652 devx_cleanup_subscription(ev_file->dev, event_sub); 2653 devx_cleanup_subscription(dev, event_sub);
2653 if (event_sub->eventfd) 2654 if (event_sub->eventfd)
2654 eventfd_ctx_put(event_sub->eventfd); 2655 eventfd_ctx_put(event_sub->eventfd);
2655 2656
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2658 kfree_rcu(event_sub, rcu); 2659 kfree_rcu(event_sub, rcu);
2659 } 2660 }
2660 2661
2661 mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock); 2662 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2662 2663
2663 /* free the pending events allocation */ 2664 /* free the pending events allocation */
2664 if (!ev_file->omit_data) { 2665 if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2670 } 2671 }
2671 2672
2672 uverbs_close_fd(filp); 2673 uverbs_close_fd(filp);
2673 put_device(&ev_file->dev->ib_dev.dev); 2674 put_device(&dev->ib_dev.dev);
2674 return 0; 2675 return 0;
2675} 2676}
2676 2677
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2a5780cb394..0569bcab02d4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 1024
1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 if (MLX5_CAP_GEN(mdev, pg)) 1026 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 props->odp_caps = dev->odp_caps; 1028 props->odp_caps = dev->odp_caps;
1029 } 1029 }
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5802 return; 5802 return;
5803 } 5803 }
5804 5804
5805 if (mpi->mdev_events.notifier_call)
5806 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5807 mpi->mdev_events.notifier_call = NULL;
5808
5809 mpi->ibdev = NULL; 5805 mpi->ibdev = NULL;
5810 5806
5811 spin_unlock(&port->mp.mpi_lock); 5807 spin_unlock(&port->mp.mpi_lock);
5808 if (mpi->mdev_events.notifier_call)
5809 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5810 mpi->mdev_events.notifier_call = NULL;
5812 mlx5_remove_netdev_notifier(ibdev, port_num); 5811 mlx5_remove_netdev_notifier(ibdev, port_num);
5813 spin_lock(&port->mp.mpi_lock); 5812 spin_lock(&port->mp.mpi_lock);
5814 5813
@@ -6140,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6140 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6141 } 6140 }
6142 6141
6142 mlx5_ib_internal_fill_odp_caps(dev);
6143
6143 err = mlx5_ib_init_multiport_master(dev); 6144 err = mlx5_ib_init_multiport_master(dev);
6144 if (err) 6145 if (err)
6145 return err; 6146 return err;
@@ -6564,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6564 6565
6565static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6566static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6566{ 6567{
6567 mlx5_ib_internal_fill_odp_caps(dev);
6568
6569 return mlx5_ib_odp_init_one(dev); 6568 return mlx5_ib_odp_init_one(dev);
6570} 6569}
6571 6570
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index fe1a76d8531c..a40e0abf2338 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
57 int entry; 57 int entry;
58 58
59 if (umem->is_odp) { 59 if (umem->is_odp) {
60 unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; 60 struct ib_umem_odp *odp = to_ib_umem_odp(umem);
61 unsigned int page_shift = odp->page_shift;
61 62
62 *ncont = ib_umem_page_count(umem); 63 *ncont = ib_umem_odp_num_pages(odp);
63 *count = *ncont << (page_shift - PAGE_SHIFT); 64 *count = *ncont << (page_shift - PAGE_SHIFT);
64 *shift = page_shift; 65 *shift = page_shift;
65 if (order) 66 if (order)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c482f19958b3..9ae587b74b12 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
481 u64 length; 481 u64 length;
482 int access_flags; 482 int access_flags;
483 u32 mkey; 483 u32 mkey;
484 u8 ignore_free_state:1;
484}; 485};
485 486
486static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) 487static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
@@ -1474,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1474 bool dyn_bfreg); 1475 bool dyn_bfreg);
1475 1476
1476int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
1478
1479static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
1480 bool do_modify_atomic)
1481{
1482 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1483 return false;
1484
1485 if (do_modify_atomic &&
1486 MLX5_CAP_GEN(dev->mdev, atomic) &&
1487 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1488 return false;
1489
1490 return true;
1491}
1477#endif /* MLX5_IB_H */ 1492#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 20ece6e0b2fc..3401f5f6792e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
52static int mr_cache_max_order(struct mlx5_ib_dev *dev); 52static int mr_cache_max_order(struct mlx5_ib_dev *dev);
53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
55{
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
57}
58 54
59static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) 55static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
60{ 56{
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); 57 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
62} 58}
63 59
64static bool use_umr(struct mlx5_ib_dev *dev, int order)
65{
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
68}
69
70static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 60static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
71{ 61{
72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); 62 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
545 return; 535 return;
546 536
547 c = order2idx(dev, mr->order); 537 c = order2idx(dev, mr->order);
548 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { 538 WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
549 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
550 return;
551 }
552 539
553 if (unreg_umr(dev, mr)) 540 if (unreg_umr(dev, mr)) {
541 mr->allocated_from_cache = false;
542 destroy_mkey(dev, mr);
543 ent = &cache->ent[c];
544 if (ent->cur < ent->limit)
545 queue_work(cache->wq, &ent->work);
554 return; 546 return;
547 }
555 548
556 ent = &cache->ent[c]; 549 ent = &cache->ent[c];
557 spin_lock_irq(&ent->lock); 550 spin_lock_irq(&ent->lock);
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1268{ 1261{
1269 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1262 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1270 struct mlx5_ib_mr *mr = NULL; 1263 struct mlx5_ib_mr *mr = NULL;
1271 bool populate_mtts = false; 1264 bool use_umr;
1272 struct ib_umem *umem; 1265 struct ib_umem *umem;
1273 int page_shift; 1266 int page_shift;
1274 int npages; 1267 int npages;
@@ -1300,29 +1293,28 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1300 if (err < 0) 1293 if (err < 0)
1301 return ERR_PTR(err); 1294 return ERR_PTR(err);
1302 1295
1303 if (use_umr(dev, order)) { 1296 use_umr = mlx5_ib_can_use_umr(dev, true);
1297
1298 if (order <= mr_cache_max_order(dev) && use_umr) {
1304 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1299 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1305 page_shift, order, access_flags); 1300 page_shift, order, access_flags);
1306 if (PTR_ERR(mr) == -EAGAIN) { 1301 if (PTR_ERR(mr) == -EAGAIN) {
1307 mlx5_ib_dbg(dev, "cache empty for order %d\n", order); 1302 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1308 mr = NULL; 1303 mr = NULL;
1309 } 1304 }
1310 populate_mtts = false;
1311 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { 1305 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1312 if (access_flags & IB_ACCESS_ON_DEMAND) { 1306 if (access_flags & IB_ACCESS_ON_DEMAND) {
1313 err = -EINVAL; 1307 err = -EINVAL;
1314 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); 1308 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1315 goto error; 1309 goto error;
1316 } 1310 }
1317 populate_mtts = true; 1311 use_umr = false;
1318 } 1312 }
1319 1313
1320 if (!mr) { 1314 if (!mr) {
1321 if (!umr_can_modify_entity_size(dev))
1322 populate_mtts = true;
1323 mutex_lock(&dev->slow_path_mutex); 1315 mutex_lock(&dev->slow_path_mutex);
1324 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, 1316 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1325 page_shift, access_flags, populate_mtts); 1317 page_shift, access_flags, !use_umr);
1326 mutex_unlock(&dev->slow_path_mutex); 1318 mutex_unlock(&dev->slow_path_mutex);
1327 } 1319 }
1328 1320
@@ -1338,7 +1330,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1338 1330
1339 update_odp_mr(mr); 1331 update_odp_mr(mr);
1340 1332
1341 if (!populate_mtts) { 1333 if (use_umr) {
1342 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; 1334 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1343 1335
1344 if (access_flags & IB_ACCESS_ON_DEMAND) 1336 if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1373,9 +1365,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1373 return 0; 1365 return 0;
1374 1366
1375 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | 1367 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1376 MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1368 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1377 umrwr.wr.opcode = MLX5_IB_WR_UMR; 1369 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1370 umrwr.pd = dev->umrc.pd;
1378 umrwr.mkey = mr->mmkey.key; 1371 umrwr.mkey = mr->mmkey.key;
1372 umrwr.ignore_free_state = 1;
1379 1373
1380 return mlx5_ib_post_send_wait(dev, &umrwr); 1374 return mlx5_ib_post_send_wait(dev, &umrwr);
1381} 1375}
@@ -1452,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1452 goto err; 1446 goto err;
1453 } 1447 }
1454 1448
1455 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1449 if (!mlx5_ib_can_use_umr(dev, true) ||
1450 (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
1456 /* 1451 /*
1457 * UMR can't be used - MKey needs to be replaced. 1452 * UMR can't be used - MKey needs to be replaced.
1458 */ 1453 */
@@ -1577,10 +1572,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1577 mr->sig = NULL; 1572 mr->sig = NULL;
1578 } 1573 }
1579 1574
1580 mlx5_free_priv_descs(mr); 1575 if (!allocated_from_cache) {
1581
1582 if (!allocated_from_cache)
1583 destroy_mkey(dev, mr); 1576 destroy_mkey(dev, mr);
1577 mlx5_free_priv_descs(mr);
1578 }
1584} 1579}
1585 1580
1586static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1581static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5b642d81e617..0a59912a4cef 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
246 * overwrite the same MTTs. Concurent invalidations might race us, 246 * overwrite the same MTTs. Concurent invalidations might race us,
247 * but they will write 0s as well, so no difference in the end result. 247 * but they will write 0s as well, so no difference in the end result.
248 */ 248 */
249 249 mutex_lock(&umem_odp->umem_mutex);
250 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { 250 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
251 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; 251 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
252 /* 252 /*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
278 idx - blk_start_idx + 1, 0, 278 idx - blk_start_idx + 1, 0,
279 MLX5_IB_UPD_XLT_ZAP | 279 MLX5_IB_UPD_XLT_ZAP |
280 MLX5_IB_UPD_XLT_ATOMIC); 280 MLX5_IB_UPD_XLT_ATOMIC);
281 mutex_unlock(&umem_odp->umem_mutex);
281 /* 282 /*
282 * We are now sure that the device will not access the 283 * We are now sure that the device will not access the
283 * memory. We can safely unmap it, and mark it as dirty if 284 * memory. We can safely unmap it, and mark it as dirty if
@@ -300,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
300 301
301 memset(caps, 0, sizeof(*caps)); 302 memset(caps, 0, sizeof(*caps));
302 303
303 if (!MLX5_CAP_GEN(dev->mdev, pg)) 304 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
305 !mlx5_ib_can_use_umr(dev, true))
304 return; 306 return;
305 307
306 caps->general_caps = IB_ODP_SUPPORT; 308 caps->general_caps = IB_ODP_SUPPORT;
@@ -354,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
354 356
355 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 357 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
356 MLX5_CAP_GEN(dev->mdev, null_mkey) && 358 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
357 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 359 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
360 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
358 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 361 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
359 362
360 return; 363 return;
@@ -578,7 +581,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
578 u32 flags) 581 u32 flags)
579{ 582{
580 int npages = 0, current_seq, page_shift, ret, np; 583 int npages = 0, current_seq, page_shift, ret, np;
581 bool implicit = false;
582 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 584 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
583 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 585 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
584 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; 586 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -593,7 +595,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
593 if (IS_ERR(odp)) 595 if (IS_ERR(odp))
594 return PTR_ERR(odp); 596 return PTR_ERR(odp);
595 mr = odp->private; 597 mr = odp->private;
596 implicit = true;
597 } else { 598 } else {
598 odp = odp_mr; 599 odp = odp_mr;
599 } 600 }
@@ -681,19 +682,15 @@ next_mr:
681 682
682out: 683out:
683 if (ret == -EAGAIN) { 684 if (ret == -EAGAIN) {
684 if (implicit || !odp->dying) { 685 unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
685 unsigned long timeout = 686
686 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); 687 if (!wait_for_completion_timeout(&odp->notifier_completion,
687 688 timeout)) {
688 if (!wait_for_completion_timeout( 689 mlx5_ib_warn(
689 &odp->notifier_completion, 690 dev,
690 timeout)) { 691 "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
691 mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", 692 current_seq, odp->notifiers_seq,
692 current_seq, odp->notifiers_seq, odp->notifiers_count); 693 odp->notifiers_count);
693 }
694 } else {
695 /* The MR is being killed, kill the QP as well. */
696 ret = -EFAULT;
697 } 694 }
698 } 695 }
699 696
@@ -1627,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1627{ 1624{
1628 int ret = 0; 1625 int ret = 0;
1629 1626
1630 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1627 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1631 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1628 return ret;
1629
1630 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1632 1631
1633 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { 1632 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1634 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); 1633 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1638,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1638 } 1637 }
1639 } 1638 }
1640 1639
1641 if (!MLX5_CAP_GEN(dev->mdev, pg))
1642 return ret;
1643
1644 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); 1640 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1645 1641
1646 return ret; 1642 return ret;
@@ -1648,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1648 1644
1649void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1645void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1650{ 1646{
1651 if (!MLX5_CAP_GEN(dev->mdev, pg)) 1647 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1652 return; 1648 return;
1653 1649
1654 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); 1650 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
@@ -1771,7 +1767,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1771 1767
1772 num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, 1768 num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
1773 w->num_sge, 0); 1769 w->num_sge, 0);
1774 kfree(w); 1770 kvfree(w);
1775} 1771}
1776 1772
1777int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1773int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1809,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1813 if (valid_req) 1809 if (valid_req)
1814 queue_work(system_unbound_wq, &work->work); 1810 queue_work(system_unbound_wq, &work->work);
1815 else 1811 else
1816 kfree(work); 1812 kvfree(work);
1817 1813
1818 srcu_read_unlock(&dev->mr_srcu, srcu_key); 1814 srcu_read_unlock(&dev->mr_srcu, srcu_key);
1819 1815
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2a97619ed603..72869ff4a334 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1713 } 1713 }
1714 1714
1715 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); 1715 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1716 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1717 memcpy(rss_key, ucmd.rx_hash_key, len); 1716 memcpy(rss_key, ucmd.rx_hash_key, len);
1718 break; 1717 break;
1719 } 1718 }
@@ -4163,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
4163 MLX5_IB_UMR_OCTOWORD; 4162 MLX5_IB_UMR_OCTOWORD;
4164} 4163}
4165 4164
4166static __be64 frwr_mkey_mask(void) 4165static __be64 frwr_mkey_mask(bool atomic)
4167{ 4166{
4168 u64 result; 4167 u64 result;
4169 4168
@@ -4176,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
4176 MLX5_MKEY_MASK_LW | 4175 MLX5_MKEY_MASK_LW |
4177 MLX5_MKEY_MASK_RR | 4176 MLX5_MKEY_MASK_RR |
4178 MLX5_MKEY_MASK_RW | 4177 MLX5_MKEY_MASK_RW |
4179 MLX5_MKEY_MASK_A |
4180 MLX5_MKEY_MASK_SMALL_FENCE | 4178 MLX5_MKEY_MASK_SMALL_FENCE |
4181 MLX5_MKEY_MASK_FREE; 4179 MLX5_MKEY_MASK_FREE;
4182 4180
4181 if (atomic)
4182 result |= MLX5_MKEY_MASK_A;
4183
4183 return cpu_to_be64(result); 4184 return cpu_to_be64(result);
4184} 4185}
4185 4186
@@ -4205,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
4205} 4206}
4206 4207
4207static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 4208static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4208 struct mlx5_ib_mr *mr, u8 flags) 4209 struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4209{ 4210{
4210 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4211 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4211 4212
@@ -4213,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4213 4214
4214 umr->flags = flags; 4215 umr->flags = flags;
4215 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4216 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4216 umr->mkey_mask = frwr_mkey_mask(); 4217 umr->mkey_mask = frwr_mkey_mask(atomic);
4217} 4218}
4218 4219
4219static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 4220static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4295,10 +4296,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
4295 4296
4296 memset(umr, 0, sizeof(*umr)); 4297 memset(umr, 0, sizeof(*umr));
4297 4298
4298 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) 4299 if (!umrwr->ignore_free_state) {
4299 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ 4300 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
4300 else 4301 /* fail if free */
4301 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ 4302 umr->flags = MLX5_UMR_CHECK_FREE;
4303 else
4304 /* fail if not free */
4305 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
4306 }
4302 4307
4303 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); 4308 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
4304 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { 4309 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
@@ -4808,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4808{ 4813{
4809 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 4814 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4810 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 4815 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4816 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4811 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4817 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4812 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 4818 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4819 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4813 u8 flags = 0; 4820 u8 flags = 0;
4814 4821
4822 if (!mlx5_ib_can_use_umr(dev, atomic)) {
4823 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4824 "Fast update of %s for MR is disabled\n",
4825 (MLX5_CAP_GEN(dev->mdev,
4826 umr_modify_entity_size_disabled)) ?
4827 "entity size" :
4828 "atomic access");
4829 return -EINVAL;
4830 }
4831
4815 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 4832 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4816 mlx5_ib_warn(to_mdev(qp->ibqp.device), 4833 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4817 "Invalid IB_SEND_INLINE send flag\n"); 4834 "Invalid IB_SEND_INLINE send flag\n");
@@ -4823,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4823 if (umr_inline) 4840 if (umr_inline)
4824 flags |= MLX5_UMR_INLINE; 4841 flags |= MLX5_UMR_INLINE;
4825 4842
4826 set_reg_umr_seg(*seg, mr, flags); 4843 set_reg_umr_seg(*seg, mr, flags, atomic);
4827 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4844 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4828 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4845 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4829 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4846 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 533157a2a3be..f97b3d65b30c 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
125 struct qedr_dev *dev = 125 struct qedr_dev *dev =
126 rdma_device_to_drv_device(device, struct qedr_dev, ibdev); 126 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
127 127
128 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); 128 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
129} 129}
130static DEVICE_ATTR_RO(hw_rev); 130static DEVICE_ATTR_RO(hw_rev);
131 131
132static ssize_t hca_type_show(struct device *device, 132static ssize_t hca_type_show(struct device *device,
133 struct device_attribute *attr, char *buf) 133 struct device_attribute *attr, char *buf)
134{ 134{
135 return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); 135 struct qedr_dev *dev =
136 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
137
138 return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
139 dev->pdev->device,
140 rdma_protocol_iwarp(&dev->ibdev, 1) ?
141 "iWARP" : "RoCE");
136} 142}
137static DEVICE_ATTR_RO(hca_type); 143static DEVICE_ATTR_RO(hca_type);
138 144
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index dace276aea14..b622fc62f2cd 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,6 @@
1config RDMA_SIW 1config RDMA_SIW
2 tristate "Software RDMA over TCP/IP (iWARP) driver" 2 tristate "Software RDMA over TCP/IP (iWARP) driver"
3 depends on INET && INFINIBAND && LIBCRC32C && 64BIT 3 depends on INET && INFINIBAND && LIBCRC32C
4 select DMA_VIRT_OPS 4 select DMA_VIRT_OPS
5 help 5 help
6 This driver implements the iWARP RDMA transport over 6 This driver implements the iWARP RDMA transport over
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 03fd7b2f595f..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
138}; 138};
139 139
140struct siw_pble { 140struct siw_pble {
141 u64 addr; /* Address of assigned user buffer */ 141 dma_addr_t addr; /* Address of assigned buffer */
142 u64 size; /* Size of this entry */ 142 unsigned int size; /* Size of this entry */
143 u64 pbl_off; /* Total offset from start of PBL */ 143 unsigned long pbl_off; /* Total offset from start of PBL */
144}; 144};
145 145
146struct siw_pbl { 146struct siw_pbl {
@@ -214,7 +214,7 @@ struct siw_wqe {
214struct siw_cq { 214struct siw_cq {
215 struct ib_cq base_cq; 215 struct ib_cq base_cq;
216 spinlock_t lock; 216 spinlock_t lock;
217 u64 *notify; 217 struct siw_cq_ctrl *notify;
218 struct siw_cqe *queue; 218 struct siw_cqe *queue;
219 u32 cq_put; 219 u32 cq_put;
220 u32 cq_get; 220 u32 cq_get;
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) 734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
735 735
736#define siw_dbg_cep(cep, fmt, ...) \ 736#define siw_dbg_cep(cep, fmt, ...) \
737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ 737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
738 cep, __func__, ##__VA_ARGS__) 738 cep, __func__, ##__VA_ARGS__)
739 739
740void siw_cq_flush(struct siw_cq *cq); 740void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index a7cde98e73e8..8c1931a57f4a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
220static void siw_cep_set_inuse(struct siw_cep *cep) 220static void siw_cep_set_inuse(struct siw_cep *cep)
221{ 221{
222 unsigned long flags; 222 unsigned long flags;
223 int rv;
224retry: 223retry:
225 spin_lock_irqsave(&cep->lock, flags); 224 spin_lock_irqsave(&cep->lock, flags);
226 225
227 if (cep->in_use) { 226 if (cep->in_use) {
228 spin_unlock_irqrestore(&cep->lock, flags); 227 spin_unlock_irqrestore(&cep->lock, flags);
229 rv = wait_event_interruptible(cep->waitq, !cep->in_use); 228 wait_event_interruptible(cep->waitq, !cep->in_use);
230 if (signal_pending(current)) 229 if (signal_pending(current))
231 flush_signals(current); 230 flush_signals(current);
232 goto retry; 231 goto retry;
@@ -356,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
356 getname_local(cep->sock, &event.local_addr); 355 getname_local(cep->sock, &event.local_addr);
357 getname_peer(cep->sock, &event.remote_addr); 356 getname_peer(cep->sock, &event.remote_addr);
358 } 357 }
359 siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", 358 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
360 cep->qp ? qp_id(cep->qp) : -1, id, reason, status); 359 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
361 360
362 return id->event_handler(id, &event); 361 return id->event_handler(id, &event);
363} 362}
@@ -948,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
948 siw_cep_get(new_cep); 947 siw_cep_get(new_cep);
949 new_s->sk->sk_user_data = new_cep; 948 new_s->sk->sk_user_data = new_cep;
950 949
951 siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
952
953 if (siw_tcp_nagle == false) { 950 if (siw_tcp_nagle == false) {
954 int val = 1; 951 int val = 1;
955 952
@@ -1012,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
1012 cep = work->cep; 1009 cep = work->cep;
1013 1010
1014 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", 1011 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1015 cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); 1012 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1013 work->type, cep->state);
1016 1014
1017 siw_cep_set_inuse(cep); 1015 siw_cep_set_inuse(cep);
1018 1016
@@ -1146,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
1146 } 1144 }
1147 if (release_cep) { 1145 if (release_cep) {
1148 siw_dbg_cep(cep, 1146 siw_dbg_cep(cep,
1149 "release: timer=%s, QP[%u], id 0x%p\n", 1147 "release: timer=%s, QP[%u]\n",
1150 cep->mpa_timer ? "y" : "n", 1148 cep->mpa_timer ? "y" : "n",
1151 cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); 1149 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1152 1150
1153 siw_cancel_mpatimer(cep); 1151 siw_cancel_mpatimer(cep);
1154 1152
@@ -1212,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1212 else 1210 else
1213 delay = MPAREP_TIMEOUT; 1211 delay = MPAREP_TIMEOUT;
1214 } 1212 }
1215 siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", 1213 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1216 cep->qp ? qp_id(cep->qp) : -1, type, work, delay); 1214 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1217 1215
1218 queue_delayed_work(siw_cm_wq, &work->work, delay); 1216 queue_delayed_work(siw_cm_wq, &work->work, delay);
1219 1217
@@ -1377,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1377 } 1375 }
1378 if (v4) 1376 if (v4)
1379 siw_dbg_qp(qp, 1377 siw_dbg_qp(qp,
1380 "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", 1378 "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
1381 id, pd_len, 1379 pd_len,
1382 &((struct sockaddr_in *)(laddr))->sin_addr, 1380 &((struct sockaddr_in *)(laddr))->sin_addr,
1383 ntohs(((struct sockaddr_in *)(laddr))->sin_port), 1381 ntohs(((struct sockaddr_in *)(laddr))->sin_port),
1384 &((struct sockaddr_in *)(raddr))->sin_addr, 1382 &((struct sockaddr_in *)(raddr))->sin_addr,
1385 ntohs(((struct sockaddr_in *)(raddr))->sin_port)); 1383 ntohs(((struct sockaddr_in *)(raddr))->sin_port));
1386 else 1384 else
1387 siw_dbg_qp(qp, 1385 siw_dbg_qp(qp,
1388 "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", 1386 "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
1389 id, pd_len, 1387 pd_len,
1390 &((struct sockaddr_in6 *)(laddr))->sin6_addr, 1388 &((struct sockaddr_in6 *)(laddr))->sin6_addr,
1391 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), 1389 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
1392 &((struct sockaddr_in6 *)(raddr))->sin6_addr, 1390 &((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1509,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1509 if (rv >= 0) { 1507 if (rv >= 0) {
1510 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); 1508 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1511 if (!rv) { 1509 if (!rv) {
1512 siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, 1510 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1513 qp_id(qp));
1514 siw_cep_set_free(cep); 1511 siw_cep_set_free(cep);
1515 return 0; 1512 return 0;
1516 } 1513 }
1517 } 1514 }
1518error: 1515error:
1519 siw_dbg_qp(qp, "failed: %d\n", rv); 1516 siw_dbg(id->device, "failed: %d\n", rv);
1520 1517
1521 if (cep) { 1518 if (cep) {
1522 siw_socket_disassoc(s); 1519 siw_socket_disassoc(s);
@@ -1541,7 +1538,8 @@ error:
1541 } else if (s) { 1538 } else if (s) {
1542 sock_release(s); 1539 sock_release(s);
1543 } 1540 }
1544 siw_qp_put(qp); 1541 if (qp)
1542 siw_qp_put(qp);
1545 1543
1546 return rv; 1544 return rv;
1547} 1545}
@@ -1581,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1581 siw_cancel_mpatimer(cep); 1579 siw_cancel_mpatimer(cep);
1582 1580
1583 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1581 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1584 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1582 siw_dbg_cep(cep, "out of state\n");
1585 1583
1586 siw_cep_set_free(cep); 1584 siw_cep_set_free(cep);
1587 siw_cep_put(cep); 1585 siw_cep_put(cep);
@@ -1602,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1602 up_write(&qp->state_lock); 1600 up_write(&qp->state_lock);
1603 goto error; 1601 goto error;
1604 } 1602 }
1605 siw_dbg_cep(cep, "id 0x%p\n", id); 1603 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1606 1604
1607 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { 1605 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1608 siw_dbg_cep(cep, "peer allows GSO on TX\n"); 1606 siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1612,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1612 params->ird > sdev->attrs.max_ird) { 1610 params->ird > sdev->attrs.max_ird) {
1613 siw_dbg_cep( 1611 siw_dbg_cep(
1614 cep, 1612 cep,
1615 "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", 1613 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1616 id, qp_id(qp), params->ord, sdev->attrs.max_ord, 1614 qp_id(qp), params->ord, sdev->attrs.max_ord,
1617 params->ird, sdev->attrs.max_ird); 1615 params->ird, sdev->attrs.max_ird);
1618 rv = -EINVAL; 1616 rv = -EINVAL;
1619 up_write(&qp->state_lock); 1617 up_write(&qp->state_lock);
@@ -1625,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1625 if (params->private_data_len > max_priv_data) { 1623 if (params->private_data_len > max_priv_data) {
1626 siw_dbg_cep( 1624 siw_dbg_cep(
1627 cep, 1625 cep,
1628 "id 0x%p, [QP %u]: private data length: %d (max %d)\n", 1626 "[QP %u]: private data length: %d (max %d)\n",
1629 id, qp_id(qp), params->private_data_len, max_priv_data); 1627 qp_id(qp), params->private_data_len, max_priv_data);
1630 rv = -EINVAL; 1628 rv = -EINVAL;
1631 up_write(&qp->state_lock); 1629 up_write(&qp->state_lock);
1632 goto error; 1630 goto error;
@@ -1680,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1680 qp_attrs.flags = SIW_MPA_CRC; 1678 qp_attrs.flags = SIW_MPA_CRC;
1681 qp_attrs.state = SIW_QP_STATE_RTS; 1679 qp_attrs.state = SIW_QP_STATE_RTS;
1682 1680
1683 siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); 1681 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1684 1682
1685 /* Associate QP with CEP */ 1683 /* Associate QP with CEP */
1686 siw_cep_get(cep); 1684 siw_cep_get(cep);
@@ -1701,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1701 if (rv) 1699 if (rv)
1702 goto error; 1700 goto error;
1703 1701
1704 siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", 1702 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1705 id, qp_id(qp), params->private_data_len); 1703 qp_id(qp), params->private_data_len);
1706 1704
1707 rv = siw_send_mpareqrep(cep, params->private_data, 1705 rv = siw_send_mpareqrep(cep, params->private_data,
1708 params->private_data_len); 1706 params->private_data_len);
@@ -1760,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1760 siw_cancel_mpatimer(cep); 1758 siw_cancel_mpatimer(cep);
1761 1759
1762 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1760 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1763 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1761 siw_dbg_cep(cep, "out of state\n");
1764 1762
1765 siw_cep_set_free(cep); 1763 siw_cep_set_free(cep);
1766 siw_cep_put(cep); /* put last reference */ 1764 siw_cep_put(cep); /* put last reference */
1767 1765
1768 return -ECONNRESET; 1766 return -ECONNRESET;
1769 } 1767 }
1770 siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, 1768 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1771 pd_len); 1769 pd_len);
1772 1770
1773 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { 1771 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1805,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1805 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, 1803 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
1806 sizeof(s_val)); 1804 sizeof(s_val));
1807 if (rv) { 1805 if (rv) {
1808 siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); 1806 siw_dbg(id->device, "setsockopt error: %d\n", rv);
1809 goto error; 1807 goto error;
1810 } 1808 }
1811 rv = s->ops->bind(s, laddr, addr_family == AF_INET ? 1809 rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
1812 sizeof(struct sockaddr_in) : 1810 sizeof(struct sockaddr_in) :
1813 sizeof(struct sockaddr_in6)); 1811 sizeof(struct sockaddr_in6));
1814 if (rv) { 1812 if (rv) {
1815 siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); 1813 siw_dbg(id->device, "socket bind error: %d\n", rv);
1816 goto error; 1814 goto error;
1817 } 1815 }
1818 cep = siw_cep_alloc(sdev); 1816 cep = siw_cep_alloc(sdev);
@@ -1825,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1825 rv = siw_cm_alloc_work(cep, backlog); 1823 rv = siw_cm_alloc_work(cep, backlog);
1826 if (rv) { 1824 if (rv) {
1827 siw_dbg(id->device, 1825 siw_dbg(id->device,
1828 "id 0x%p: alloc_work error %d, backlog %d\n", id, 1826 "alloc_work error %d, backlog %d\n",
1829 rv, backlog); 1827 rv, backlog);
1830 goto error; 1828 goto error;
1831 } 1829 }
1832 rv = s->ops->listen(s, backlog); 1830 rv = s->ops->listen(s, backlog);
1833 if (rv) { 1831 if (rv) {
1834 siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); 1832 siw_dbg(id->device, "listen error %d\n", rv);
1835 goto error; 1833 goto error;
1836 } 1834 }
1837 cep->cm_id = id; 1835 cep->cm_id = id;
@@ -1915,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
1915 1913
1916 list_del(p); 1914 list_del(p);
1917 1915
1918 siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, 1916 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1919 cep->state);
1920 1917
1921 siw_cep_set_inuse(cep); 1918 siw_cep_set_inuse(cep);
1922 1919
@@ -1953,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1953 struct net_device *dev = to_siw_dev(id->device)->netdev; 1950 struct net_device *dev = to_siw_dev(id->device)->netdev;
1954 int rv = 0, listeners = 0; 1951 int rv = 0, listeners = 0;
1955 1952
1956 siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); 1953 siw_dbg(id->device, "backlog %d\n", backlog);
1957 1954
1958 /* 1955 /*
1959 * For each attached address of the interface, create a 1956 * For each attached address of the interface, create a
@@ -1965,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1965 struct sockaddr_in s_laddr, *s_raddr; 1962 struct sockaddr_in s_laddr, *s_raddr;
1966 const struct in_ifaddr *ifa; 1963 const struct in_ifaddr *ifa;
1967 1964
1965 if (!in_dev) {
1966 rv = -ENODEV;
1967 goto out;
1968 }
1968 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); 1969 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
1969 s_raddr = (struct sockaddr_in *)&id->remote_addr; 1970 s_raddr = (struct sockaddr_in *)&id->remote_addr;
1970 1971
1971 siw_dbg(id->device, 1972 siw_dbg(id->device,
1972 "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", 1973 "laddr %pI4:%d, raddr %pI4:%d\n",
1973 id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), 1974 &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
1974 &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); 1975 &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
1975 1976
1976 rtnl_lock(); 1977 rtnl_lock();
@@ -1994,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1994 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), 1995 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
1995 *s_raddr = &to_sockaddr_in6(id->remote_addr); 1996 *s_raddr = &to_sockaddr_in6(id->remote_addr);
1996 1997
1998 if (!in6_dev) {
1999 rv = -ENODEV;
2000 goto out;
2001 }
1997 siw_dbg(id->device, 2002 siw_dbg(id->device,
1998 "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", 2003 "laddr %pI6:%d, raddr %pI6:%d\n",
1999 id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), 2004 &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
2000 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); 2005 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
2001 2006
2002 read_lock_bh(&in6_dev->lock); 2007 rtnl_lock();
2003 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 2008 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
2004 struct sockaddr_in6 bind_addr; 2009 if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
2005 2010 continue;
2006 if (ipv6_addr_any(&s_laddr->sin6_addr) || 2011 if (ipv6_addr_any(&s_laddr->sin6_addr) ||
2007 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { 2012 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
2008 bind_addr.sin6_family = AF_INET6; 2013 struct sockaddr_in6 bind_addr = {
2009 bind_addr.sin6_port = s_laddr->sin6_port; 2014 .sin6_family = AF_INET6,
2010 bind_addr.sin6_flowinfo = 0; 2015 .sin6_port = s_laddr->sin6_port,
2011 bind_addr.sin6_addr = ifp->addr; 2016 .sin6_flowinfo = 0,
2012 bind_addr.sin6_scope_id = dev->ifindex; 2017 .sin6_addr = ifp->addr,
2018 .sin6_scope_id = dev->ifindex };
2013 2019
2014 rv = siw_listen_address(id, backlog, 2020 rv = siw_listen_address(id, backlog,
2015 (struct sockaddr *)&bind_addr, 2021 (struct sockaddr *)&bind_addr,
@@ -2018,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
2018 listeners++; 2024 listeners++;
2019 } 2025 }
2020 } 2026 }
2021 read_unlock_bh(&in6_dev->lock); 2027 rtnl_unlock();
2022
2023 in6_dev_put(in6_dev); 2028 in6_dev_put(in6_dev);
2024 } else { 2029 } else {
2025 return -EAFNOSUPPORT; 2030 rv = -EAFNOSUPPORT;
2026 } 2031 }
2032out:
2027 if (listeners) 2033 if (listeners)
2028 rv = 0; 2034 rv = 0;
2029 else if (!rv) 2035 else if (!rv)
2030 rv = -EINVAL; 2036 rv = -EINVAL;
2031 2037
2032 siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); 2038 siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
2033 2039
2034 return rv; 2040 return rv;
2035} 2041}
2036 2042
2037int siw_destroy_listen(struct iw_cm_id *id) 2043int siw_destroy_listen(struct iw_cm_id *id)
2038{ 2044{
2039 siw_dbg(id->device, "id 0x%p\n", id);
2040
2041 if (!id->provider_data) { 2045 if (!id->provider_data) {
2042 siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); 2046 siw_dbg(id->device, "no cep(s)\n");
2043 return 0; 2047 return 0;
2044 } 2048 }
2045 siw_drop_listeners(id); 2049 siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
71 wc->wc_flags = IB_WC_WITH_INVALIDATE; 71 wc->wc_flags = IB_WC_WITH_INVALIDATE;
72 } 72 }
73 wc->qp = cqe->base_qp; 73 wc->qp = cqe->base_qp;
74 siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", 74 siw_dbg_cq(cq,
75 "idx %u, type %d, flags %2x, id 0x%pK\n",
75 cq->cq_get % cq->num_cqe, cqe->opcode, 76 cq->cq_get % cq->num_cqe, cqe->opcode,
76 cqe->flags, (void *)cqe->id); 77 cqe->flags, (void *)(uintptr_t)cqe->id);
77 } 78 }
78 WRITE_ONCE(cqe->flags, 0); 79 WRITE_ONCE(cqe->flags, 0);
79 cq->cq_get++; 80 cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index f55c4e80aea4..05a92f997f60 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
160 160
161out_err: 161out_err:
162 siw_cpu_info.num_nodes = 0; 162 siw_cpu_info.num_nodes = 0;
163 while (i) { 163 while (--i >= 0)
164 kfree(siw_cpu_info.tx_valid_cpus[i]); 164 kfree(siw_cpu_info.tx_valid_cpus[i]);
165 siw_cpu_info.tx_valid_cpus[i--] = NULL;
166 }
167 kfree(siw_cpu_info.tx_valid_cpus); 165 kfree(siw_cpu_info.tx_valid_cpus);
168 siw_cpu_info.tx_valid_cpus = NULL; 166 siw_cpu_info.tx_valid_cpus = NULL;
169 167
@@ -612,6 +610,7 @@ static __init int siw_init_module(void)
612 610
613 if (!siw_create_tx_threads()) { 611 if (!siw_create_tx_threads()) {
614 pr_info("siw: Could not start any TX thread\n"); 612 pr_info("siw: Could not start any TX thread\n");
613 rv = -ENOMEM;
615 goto out_error; 614 goto out_error;
616 } 615 }
617 /* 616 /*
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
197 */ 197 */
198 if (addr < mem->va || addr + len > mem->va + mem->len) { 198 if (addr < mem->va || addr + len > mem->va + mem->len) {
199 siw_dbg_pd(pd, "MEM interval len %d\n", len); 199 siw_dbg_pd(pd, "MEM interval len %d\n", len);
200 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", 200 siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
201 (unsigned long long)addr, 201 (void *)(uintptr_t)addr,
202 (unsigned long long)(addr + len)); 202 (void *)(uintptr_t)(addr + len));
203 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", 203 siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
204 (unsigned long long)mem->va, 204 (void *)(uintptr_t)mem->va,
205 (unsigned long long)(mem->va + mem->len), 205 (void *)(uintptr_t)(mem->va + mem->len),
206 mem->stag); 206 mem->stag);
207 207
208 return -E_BASE_BOUNDS; 208 return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
330 * Optionally, provides remaining len within current element, and 330 * Optionally, provides remaining len within current element, and
331 * current PBL index for later resume at same element. 331 * current PBL index for later resume at same element.
332 */ 332 */
333u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) 333dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
334{ 334{
335 int i = idx ? *idx : 0; 335 int i = idx ? *idx : 0;
336 336
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); 9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10void siw_umem_release(struct siw_umem *umem, bool dirty); 10void siw_umem_release(struct siw_umem *umem, bool dirty);
11struct siw_pbl *siw_pbl_alloc(u32 num_buf); 11struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); 12dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); 13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); 14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15int siw_invalidate_stag(struct ib_pd *pd, u32 stag); 15int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 11383d9f95ef..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp)
220{ 220{
221 struct siw_rx_stream *c_rx = &qp->rx_stream; 221 struct siw_rx_stream *c_rx = &qp->rx_stream;
222 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; 222 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
223 int size = crypto_shash_descsize(siw_crypto_shash) + 223 int size;
224 sizeof(struct shash_desc);
225 224
226 if (siw_crypto_shash == NULL) 225 if (siw_crypto_shash == NULL)
227 return -ENOENT; 226 return -ENOENT;
228 227
228 size = crypto_shash_descsize(siw_crypto_shash) +
229 sizeof(struct shash_desc);
230
229 c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); 231 c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
230 c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); 232 c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
231 if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { 233 if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
@@ -947,7 +949,7 @@ skip_irq:
947 rv = -EINVAL; 949 rv = -EINVAL;
948 goto out; 950 goto out;
949 } 951 }
950 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 952 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
951 wqe->sqe.sge[0].lkey = 0; 953 wqe->sqe.sge[0].lkey = 0;
952 wqe->sqe.num_sge = 1; 954 wqe->sqe.num_sge = 1;
953 } 955 }
@@ -1011,18 +1013,24 @@ out:
1011 */ 1013 */
1012static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) 1014static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
1013{ 1015{
1014 u64 cq_notify; 1016 u32 cq_notify;
1015 1017
1016 if (!cq->base_cq.comp_handler) 1018 if (!cq->base_cq.comp_handler)
1017 return false; 1019 return false;
1018 1020
1019 cq_notify = READ_ONCE(*cq->notify); 1021 /* Read application shared notification state */
1022 cq_notify = READ_ONCE(cq->notify->flags);
1020 1023
1021 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || 1024 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
1022 ((cq_notify & SIW_NOTIFY_SOLICITED) && 1025 ((cq_notify & SIW_NOTIFY_SOLICITED) &&
1023 (flags & SIW_WQE_SOLICITED))) { 1026 (flags & SIW_WQE_SOLICITED))) {
1024 /* dis-arm CQ */ 1027 /*
1025 smp_store_mb(*cq->notify, SIW_NOTIFY_NOT); 1028 * CQ notification is one-shot: Since the
1029 * current CQE causes user notification,
1030 * the CQ gets dis-aremd and must be re-aremd
1031 * by the user for a new notification.
1032 */
1033 WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
1026 1034
1027 return true; 1035 return true;
1028 } 1036 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
38 38
39 p = siw_get_upage(umem, dest_addr); 39 p = siw_get_upage(umem, dest_addr);
40 if (unlikely(!p)) { 40 if (unlikely(!p)) {
41 pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", 41 pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
42 __func__, qp_id(rx_qp(srx)), 42 __func__, qp_id(rx_qp(srx)),
43 (void *)dest_addr, (void *)umem->fp_addr); 43 (void *)(uintptr_t)dest_addr,
44 (void *)(uintptr_t)umem->fp_addr);
44 /* siw internal error */ 45 /* siw internal error */
45 srx->skb_copied += copied; 46 srx->skb_copied += copied;
46 srx->skb_new -= copied; 47 srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
50 pg_off = dest_addr & ~PAGE_MASK; 51 pg_off = dest_addr & ~PAGE_MASK;
51 bytes = min(len, (int)PAGE_SIZE - pg_off); 52 bytes = min(len, (int)PAGE_SIZE - pg_off);
52 53
53 siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); 54 siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
54 55
55 dest = kmap_atomic(p); 56 dest = kmap_atomic(p);
56 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, 57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
104{ 105{
105 int rv; 106 int rv;
106 107
107 siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); 108 siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
108 109
109 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); 110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
110 if (unlikely(rv)) { 111 if (unlikely(rv)) {
111 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", 112 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
112 qp_id(rx_qp(srx)), __func__, len, kva, rv); 113 qp_id(rx_qp(srx)), __func__, len, kva, rv);
113 114
114 return rv; 115 return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
132 133
133 while (len) { 134 while (len) {
134 int bytes; 135 int bytes;
135 u64 buf_addr = 136 dma_addr_t buf_addr =
136 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); 137 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
137 if (!buf_addr) 138 if (!buf_addr)
138 break; 139 break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
485 mem_p = *mem; 486 mem_p = *mem;
486 if (mem_p->mem_obj == NULL) 487 if (mem_p->mem_obj == NULL)
487 rv = siw_rx_kva(srx, 488 rv = siw_rx_kva(srx,
488 (void *)(sge->laddr + frx->sge_off), 489 (void *)(uintptr_t)(sge->laddr + frx->sge_off),
489 sge_bytes); 490 sge_bytes);
490 else if (!mem_p->is_pbl) 491 else if (!mem_p->is_pbl)
491 rv = siw_rx_umem(srx, mem_p->umem, 492 rv = siw_rx_umem(srx, mem_p->umem,
492 sge->laddr + frx->sge_off, sge_bytes); 493 sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
598 599
599 if (mem->mem_obj == NULL) 600 if (mem->mem_obj == NULL)
600 rv = siw_rx_kva(srx, 601 rv = siw_rx_kva(srx,
601 (void *)(srx->ddp_to + srx->fpdu_part_rcvd), 602 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
602 bytes); 603 bytes);
603 else if (!mem->is_pbl) 604 else if (!mem->is_pbl)
604 rv = siw_rx_umem(srx, mem->umem, 605 rv = siw_rx_umem(srx, mem->umem,
605 srx->ddp_to + srx->fpdu_part_rcvd, bytes); 606 srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
841 bytes = min(srx->fpdu_part_rem, srx->skb_new); 842 bytes = min(srx->fpdu_part_rem, srx->skb_new);
842 843
843 if (mem_p->mem_obj == NULL) 844 if (mem_p->mem_obj == NULL)
844 rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), 845 rv = siw_rx_kva(srx,
845 bytes); 846 (void *)(uintptr_t)(sge->laddr + wqe->processed),
847 bytes);
846 else if (!mem_p->is_pbl) 848 else if (!mem_p->is_pbl)
847 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, 849 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
848 bytes); 850 bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..438a2917a47c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
26{ 26{
27 struct siw_pbl *pbl = mem->pbl; 27 struct siw_pbl *pbl = mem->pbl;
28 u64 offset = addr - mem->va; 28 u64 offset = addr - mem->va;
29 u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); 29 dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
30 30
31 if (paddr) 31 if (paddr)
32 return virt_to_page(paddr); 32 return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
37/* 37/*
38 * Copy short payload at provided destination payload address 38 * Copy short payload at provided destination payload address
39 */ 39 */
40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) 40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
41{ 41{
42 struct siw_wqe *wqe = &c_tx->wqe_active; 42 struct siw_wqe *wqe = &c_tx->wqe_active;
43 struct siw_sge *sge = &wqe->sqe.sge[0]; 43 struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
50 return 0; 50 return 0;
51 51
52 if (tx_flags(wqe) & SIW_WQE_INLINE) { 52 if (tx_flags(wqe) & SIW_WQE_INLINE) {
53 memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); 53 memcpy(paddr, &wqe->sqe.sge[1], bytes);
54 } else { 54 } else {
55 struct siw_mem *mem = wqe->mem[0]; 55 struct siw_mem *mem = wqe->mem[0];
56 56
57 if (!mem->mem_obj) { 57 if (!mem->mem_obj) {
58 /* Kernel client using kva */ 58 /* Kernel client using kva */
59 memcpy((void *)paddr, (void *)sge->laddr, bytes); 59 memcpy(paddr,
60 (const void *)(uintptr_t)sge->laddr, bytes);
60 } else if (c_tx->in_syscall) { 61 } else if (c_tx->in_syscall) {
61 if (copy_from_user((void *)paddr, 62 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
62 (const void __user *)sge->laddr,
63 bytes)) 63 bytes))
64 return -EFAULT; 64 return -EFAULT;
65 } else { 65 } else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
79 buffer = kmap_atomic(p); 79 buffer = kmap_atomic(p);
80 80
81 if (likely(PAGE_SIZE - off >= bytes)) { 81 if (likely(PAGE_SIZE - off >= bytes)) {
82 memcpy((void *)paddr, buffer + off, bytes); 82 memcpy(paddr, buffer + off, bytes);
83 kunmap_atomic(buffer); 83 kunmap_atomic(buffer);
84 } else { 84 } else {
85 unsigned long part = bytes - (PAGE_SIZE - off); 85 unsigned long part = bytes - (PAGE_SIZE - off);
86 86
87 memcpy((void *)paddr, buffer + off, part); 87 memcpy(paddr, buffer + off, part);
88 kunmap_atomic(buffer); 88 kunmap_atomic(buffer);
89 89
90 if (!mem->is_pbl) 90 if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
98 return -EFAULT; 98 return -EFAULT;
99 99
100 buffer = kmap_atomic(p); 100 buffer = kmap_atomic(p);
101 memcpy((void *)(paddr + part), buffer, 101 memcpy(paddr + part, buffer,
102 bytes - part); 102 bytes - part);
103 kunmap_atomic(buffer); 103 kunmap_atomic(buffer);
104 } 104 }
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
166 c_tx->ctrl_len = sizeof(struct iwarp_send); 166 c_tx->ctrl_len = sizeof(struct iwarp_send);
167 167
168 crc = (char *)&c_tx->pkt.send_pkt.crc; 168 crc = (char *)&c_tx->pkt.send_pkt.crc;
169 data = siw_try_1seg(c_tx, (u64)crc); 169 data = siw_try_1seg(c_tx, crc);
170 break; 170 break;
171 171
172 case SIW_OP_SEND_REMOTE_INV: 172 case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv); 189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
190 190
191 crc = (char *)&c_tx->pkt.send_pkt.crc; 191 crc = (char *)&c_tx->pkt.send_pkt.crc;
192 data = siw_try_1seg(c_tx, (u64)crc); 192 data = siw_try_1seg(c_tx, crc);
193 break; 193 break;
194 194
195 case SIW_OP_WRITE: 195 case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); 201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
202 202
203 crc = (char *)&c_tx->pkt.write_pkt.crc; 203 crc = (char *)&c_tx->pkt.write_pkt.crc;
204 data = siw_try_1seg(c_tx, (u64)crc); 204 data = siw_try_1seg(c_tx, crc);
205 break; 205 break;
206 206
207 case SIW_OP_READ_RESPONSE: 207 case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); 216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
217 217
218 crc = (char *)&c_tx->pkt.write_pkt.crc; 218 crc = (char *)&c_tx->pkt.write_pkt.crc;
219 data = siw_try_1seg(c_tx, (u64)crc); 219 data = siw_try_1seg(c_tx, crc);
220 break; 220 break;
221 221
222 default: 222 default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
398 398
399#define MAX_TRAILER (MPA_CRC_SIZE + 4) 399#define MAX_TRAILER (MPA_CRC_SIZE + 4)
400 400
401static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) 401static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
402{ 402{
403 if (hdr_len) { 403 while (kmap_mask) {
404 ++pages; 404 if (kmap_mask & BIT(0))
405 --num_maps; 405 kunmap(*pp);
406 } 406 pp++;
407 while (num_maps-- > 0) { 407 kmap_mask >>= 1;
408 kunmap(*pages);
409 pages++;
410 } 408 }
411} 409}
412 410
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
437 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, 435 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
438 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, 436 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
439 pbl_idx = c_tx->pbl_idx; 437 pbl_idx = c_tx->pbl_idx;
438 unsigned long kmap_mask = 0L;
440 439
441 if (c_tx->state == SIW_SEND_HDR) { 440 if (c_tx->state == SIW_SEND_HDR) {
442 if (c_tx->use_sendpage) { 441 if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
463 462
464 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { 463 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
465 mem = wqe->mem[sge_idx]; 464 mem = wqe->mem[sge_idx];
466 if (!mem->mem_obj) 465 is_kva = mem->mem_obj == NULL ? 1 : 0;
467 is_kva = 1;
468 } else { 466 } else {
469 is_kva = 1; 467 is_kva = 1;
470 } 468 }
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
473 * tx from kernel virtual address: either inline data 471 * tx from kernel virtual address: either inline data
474 * or memory region with assigned kernel buffer 472 * or memory region with assigned kernel buffer
475 */ 473 */
476 iov[seg].iov_base = (void *)(sge->laddr + sge_off); 474 iov[seg].iov_base =
475 (void *)(uintptr_t)(sge->laddr + sge_off);
477 iov[seg].iov_len = sge_len; 476 iov[seg].iov_len = sge_len;
478 477
479 if (do_crc) 478 if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
500 p = siw_get_upage(mem->umem, 499 p = siw_get_upage(mem->umem,
501 sge->laddr + sge_off); 500 sge->laddr + sge_off);
502 if (unlikely(!p)) { 501 if (unlikely(!p)) {
503 if (hdr_len) 502 siw_unmap_pages(page_array, kmap_mask);
504 seg--;
505 if (!c_tx->use_sendpage && seg) {
506 siw_unmap_pages(page_array,
507 hdr_len, seg);
508 }
509 wqe->processed -= c_tx->bytes_unsent; 503 wqe->processed -= c_tx->bytes_unsent;
510 rv = -EFAULT; 504 rv = -EFAULT;
511 goto done_crc; 505 goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
515 if (!c_tx->use_sendpage) { 509 if (!c_tx->use_sendpage) {
516 iov[seg].iov_base = kmap(p) + fp_off; 510 iov[seg].iov_base = kmap(p) + fp_off;
517 iov[seg].iov_len = plen; 511 iov[seg].iov_len = plen;
512
513 /* Remember for later kunmap() */
514 kmap_mask |= BIT(seg);
515
518 if (do_crc) 516 if (do_crc)
519 crypto_shash_update( 517 crypto_shash_update(
520 c_tx->mpa_crc_hd, 518 c_tx->mpa_crc_hd,
@@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
526 page_address(p) + fp_off, 524 page_address(p) + fp_off,
527 plen); 525 plen);
528 } else { 526 } else {
529 u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); 527 u64 va = sge->laddr + sge_off;
530 528
531 page_array[seg] = virt_to_page(pa); 529 page_array[seg] = virt_to_page(va & PAGE_MASK);
532 if (do_crc) 530 if (do_crc)
533 crypto_shash_update( 531 crypto_shash_update(
534 c_tx->mpa_crc_hd, 532 c_tx->mpa_crc_hd,
535 (void *)(sge->laddr + sge_off), 533 (void *)(uintptr_t)va,
536 plen); 534 plen);
537 } 535 }
538 536
@@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
543 541
544 if (++seg > (int)MAX_ARRAY) { 542 if (++seg > (int)MAX_ARRAY) {
545 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); 543 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
546 if (!is_kva && !c_tx->use_sendpage) { 544 siw_unmap_pages(page_array, kmap_mask);
547 siw_unmap_pages(page_array, hdr_len,
548 seg - 1);
549 }
550 wqe->processed -= c_tx->bytes_unsent; 545 wqe->processed -= c_tx->bytes_unsent;
551 rv = -EMSGSIZE; 546 rv = -EMSGSIZE;
552 goto done_crc; 547 goto done_crc;
@@ -597,8 +592,7 @@ sge_done:
597 } else { 592 } else {
598 rv = kernel_sendmsg(s, &msg, iov, seg + 1, 593 rv = kernel_sendmsg(s, &msg, iov, seg + 1,
599 hdr_len + data_len + trl_len); 594 hdr_len + data_len + trl_len);
600 if (!is_kva) 595 siw_unmap_pages(page_array, kmap_mask);
601 siw_unmap_pages(page_array, hdr_len, seg);
602 } 596 }
603 if (rv < (int)hdr_len) { 597 if (rv < (int)hdr_len) {
604 /* Not even complete hdr pushed or negative rv */ 598 /* Not even complete hdr pushed or negative rv */
@@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
829 rv = -EINVAL; 823 rv = -EINVAL;
830 goto tx_error; 824 goto tx_error;
831 } 825 }
832 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 826 wqe->sqe.sge[0].laddr =
827 (u64)(uintptr_t)&wqe->sqe.sge[1];
833 } 828 }
834 } 829 }
835 wqe->wr_status = SIW_WR_INPROGRESS; 830 wqe->wr_status = SIW_WR_INPROGRESS;
@@ -924,7 +919,7 @@ tx_error:
924 919
925static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) 920static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
926{ 921{
927 struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; 922 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
928 struct siw_device *sdev = to_siw_dev(pd->device); 923 struct siw_device *sdev = to_siw_dev(pd->device);
929 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 924 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
930 int rv = 0; 925 int rv = 0;
@@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
954 mem->stag = sqe->rkey; 949 mem->stag = sqe->rkey;
955 mem->perms = sqe->access; 950 mem->perms = sqe->access;
956 951
957 siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", 952 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
958 mem->va, base_mr->iova);
959 mem->va = base_mr->iova; 953 mem->va = base_mr->iova;
960 mem->stag_valid = 1; 954 mem->stag_valid = 1;
961out: 955out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 32dc79d0e898..da52c90e06d4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
424 */ 424 */
425 qp->srq = to_siw_srq(attrs->srq); 425 qp->srq = to_siw_srq(attrs->srq);
426 qp->attrs.rq_size = 0; 426 qp->attrs.rq_size = 0;
427 siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", 427 siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
428 qp->qp_num, qp->srq);
429 } else if (num_rqe) { 428 } else if (num_rqe) {
430 if (qp->kernel_verbs) 429 if (qp->kernel_verbs)
431 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 430 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
610 base_ucontext); 609 base_ucontext);
611 struct siw_qp_attrs qp_attrs; 610 struct siw_qp_attrs qp_attrs;
612 611
613 siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); 612 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
614 613
615 /* 614 /*
616 * Mark QP as in process of destruction to prevent from 615 * Mark QP as in process of destruction to prevent from
@@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
662 void *kbuf = &sqe->sge[1]; 661 void *kbuf = &sqe->sge[1];
663 int num_sge = core_wr->num_sge, bytes = 0; 662 int num_sge = core_wr->num_sge, bytes = 0;
664 663
665 sqe->sge[0].laddr = (u64)kbuf; 664 sqe->sge[0].laddr = (uintptr_t)kbuf;
666 sqe->sge[0].lkey = 0; 665 sqe->sge[0].lkey = 0;
667 666
668 while (num_sge--) { 667 while (num_sge--) {
@@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
825 break; 824 break;
826 825
827 case IB_WR_REG_MR: 826 case IB_WR_REG_MR:
828 sqe->base_mr = (uint64_t)reg_wr(wr)->mr; 827 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
829 sqe->rkey = reg_wr(wr)->key; 828 sqe->rkey = reg_wr(wr)->key;
830 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 829 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
831 sqe->opcode = SIW_OP_REG_MR; 830 sqe->opcode = SIW_OP_REG_MR;
@@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
842 rv = -EINVAL; 841 rv = -EINVAL;
843 break; 842 break;
844 } 843 }
845 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", 844 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
846 sqe->opcode, sqe->flags, (void *)sqe->id); 845 sqe->opcode, sqe->flags,
846 (void *)(uintptr_t)sqe->id);
847 847
848 if (unlikely(rv < 0)) 848 if (unlikely(rv < 0))
849 break; 849 break;
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1049 1049
1050 spin_lock_init(&cq->lock); 1050 spin_lock_init(&cq->lock);
1051 1051
1052 cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify; 1052 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1053 1053
1054 if (udata) { 1054 if (udata) {
1055 struct siw_uresp_create_cq uresp = {}; 1055 struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags); 1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1142 1142
1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1144 /* CQ event for next solicited completion */ 1144 /*
1145 smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED); 1145 * Enable CQ event for next solicited completion.
1146 * and make it visible to all associated producers.
1147 */
1148 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1146 else 1149 else
1147 /* CQ event for any signalled completion */ 1150 /*
1148 smp_store_mb(*cq->notify, SIW_NOTIFY_ALL); 1151 * Enable CQ event for any signalled completion.
1152 * and make it visible to all associated producers.
1153 */
1154 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1149 1155
1150 if (flags & IB_CQ_REPORT_MISSED_EVENTS) 1156 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1151 return cq->cq_put - cq->cq_get; 1157 return cq->cq_put - cq->cq_get;
@@ -1199,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1199 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1200 int rv; 1206 int rv;
1201 1207
1202 siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", 1208 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1203 (unsigned long long)start, (unsigned long long)rnic_va, 1209 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1204 (unsigned long long)len); 1210 (unsigned long long)len);
1205 1211
1206 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1357,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1357 struct siw_mem *mem = mr->mem; 1363 struct siw_mem *mem = mr->mem;
1358 struct siw_pbl *pbl = mem->pbl; 1364 struct siw_pbl *pbl = mem->pbl;
1359 struct siw_pble *pble; 1365 struct siw_pble *pble;
1360 u64 pbl_size; 1366 unsigned long pbl_size;
1361 int i, rv; 1367 int i, rv;
1362 1368
1363 if (!pbl) { 1369 if (!pbl) {
@@ -1396,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1396 pbl_size += sg_dma_len(slp); 1402 pbl_size += sg_dma_len(slp);
1397 } 1403 }
1398 siw_dbg_mem(mem, 1404 siw_dbg_mem(mem,
1399 "sge[%d], size %llu, addr 0x%016llx, total %llu\n", 1405 "sge[%d], size %u, addr 0x%p, total %lu\n",
1400 i, pble->size, pble->addr, pbl_size); 1406 i, pble->size, (void *)(uintptr_t)pble->addr,
1407 pbl_size);
1401 } 1408 }
1402 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1409 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1403 if (rv > 0) { 1410 if (rv > 0) {
1404 mem->len = base_mr->length; 1411 mem->len = base_mr->length;
1405 mem->va = base_mr->iova; 1412 mem->va = base_mr->iova;
1406 siw_dbg_mem(mem, 1413 siw_dbg_mem(mem,
1407 "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", 1414 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1408 mem->len, mem->va, num_sle, pbl->num_buf); 1415 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1416 pbl->num_buf);
1409 } 1417 }
1410 return rv; 1418 return rv;
1411} 1419}
@@ -1523,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
1523 } 1531 }
1524 spin_lock_init(&srq->lock); 1532 spin_lock_init(&srq->lock);
1525 1533
1526 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); 1534 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1527 1535
1528 return 0; 1536 return 0;
1529 1537
@@ -1644,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1644 1652
1645 if (unlikely(!srq->kernel_verbs)) { 1653 if (unlikely(!srq->kernel_verbs)) {
1646 siw_dbg_pd(base_srq->pd, 1654 siw_dbg_pd(base_srq->pd,
1647 "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", 1655 "[SRQ]: no kernel post_recv for mapped srq\n");
1648 srq);
1649 rv = -EINVAL; 1656 rv = -EINVAL;
1650 goto out; 1657 goto out;
1651 } 1658 }
@@ -1667,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1667 } 1674 }
1668 if (unlikely(wr->num_sge > srq->max_sge)) { 1675 if (unlikely(wr->num_sge > srq->max_sge)) {
1669 siw_dbg_pd(base_srq->pd, 1676 siw_dbg_pd(base_srq->pd,
1670 "[SRQ 0x%p]: too many sge's: %d\n", srq, 1677 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1671 wr->num_sge);
1672 rv = -EINVAL; 1678 rv = -EINVAL;
1673 break; 1679 break;
1674 } 1680 }
@@ -1687,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1687 spin_unlock_irqrestore(&srq->lock, flags); 1693 spin_unlock_irqrestore(&srq->lock, flags);
1688out: 1694out:
1689 if (unlikely(rv < 0)) { 1695 if (unlikely(rv < 0)) {
1690 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); 1696 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1691 *bad_wr = wr; 1697 *bad_wr = wr;
1692 } 1698 }
1693 return rv; 1699 return rv;
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 29abfeeef9a5..6c554c11a7ac 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -201,7 +201,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
201 return -ENODEV; 201 return -ENODEV;
202 202
203 epirq = &interface->endpoint[0].desc; 203 epirq = &interface->endpoint[0].desc;
204 if (!usb_endpoint_is_int_in(epirq))
205 return -ENODEV;
206
204 epout = &interface->endpoint[1].desc; 207 epout = &interface->endpoint[1].desc;
208 if (!usb_endpoint_is_int_out(epout))
209 return -ENODEV;
205 210
206 iforce_usb = kzalloc(sizeof(*iforce_usb), GFP_KERNEL); 211 iforce_usb = kzalloc(sizeof(*iforce_usb), GFP_KERNEL);
207 if (!iforce_usb) 212 if (!iforce_usb)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 8e9c3ea9d5e7..90e8a7f2f07c 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -76,6 +76,8 @@ config KEYBOARD_APPLESPI
76 depends on ACPI && EFI 76 depends on ACPI && EFI
77 depends on SPI 77 depends on SPI
78 depends on X86 || COMPILE_TEST 78 depends on X86 || COMPILE_TEST
79 depends on LEDS_CLASS
80 select CRC16
79 help 81 help
80 Say Y here if you are running Linux on any Apple MacBook8,1 or later, 82 Say Y here if you are running Linux on any Apple MacBook8,1 or later,
81 or any MacBookPro13,* or MacBookPro14,*. 83 or any MacBookPro13,* or MacBookPro14,*.
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index 548737e7aeda..584289b67fb3 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -134,10 +134,10 @@ struct keyboard_protocol {
134 * struct tp_finger - single trackpad finger structure, le16-aligned 134 * struct tp_finger - single trackpad finger structure, le16-aligned
135 * 135 *
136 * @origin: zero when switching track finger 136 * @origin: zero when switching track finger
137 * @abs_x: absolute x coodinate 137 * @abs_x: absolute x coordinate
138 * @abs_y: absolute y coodinate 138 * @abs_y: absolute y coordinate
139 * @rel_x: relative x coodinate 139 * @rel_x: relative x coordinate
140 * @rel_y: relative y coodinate 140 * @rel_y: relative y coordinate
141 * @tool_major: tool area, major axis 141 * @tool_major: tool area, major axis
142 * @tool_minor: tool area, minor axis 142 * @tool_minor: tool area, minor axis
143 * @orientation: 16384 when point, else 15 bit angle 143 * @orientation: 16384 when point, else 15 bit angle
@@ -944,10 +944,14 @@ static inline int le16_to_int(__le16 x)
944static void applespi_debug_update_dimensions(struct applespi_data *applespi, 944static void applespi_debug_update_dimensions(struct applespi_data *applespi,
945 const struct tp_finger *f) 945 const struct tp_finger *f)
946{ 946{
947 applespi->tp_dim_min_x = min_t(int, applespi->tp_dim_min_x, f->abs_x); 947 applespi->tp_dim_min_x = min(applespi->tp_dim_min_x,
948 applespi->tp_dim_max_x = max_t(int, applespi->tp_dim_max_x, f->abs_x); 948 le16_to_int(f->abs_x));
949 applespi->tp_dim_min_y = min_t(int, applespi->tp_dim_min_y, f->abs_y); 949 applespi->tp_dim_max_x = max(applespi->tp_dim_max_x,
950 applespi->tp_dim_max_y = max_t(int, applespi->tp_dim_max_y, f->abs_y); 950 le16_to_int(f->abs_x));
951 applespi->tp_dim_min_y = min(applespi->tp_dim_min_y,
952 le16_to_int(f->abs_y));
953 applespi->tp_dim_max_y = max(applespi->tp_dim_max_y,
954 le16_to_int(f->abs_y));
951} 955}
952 956
953static int applespi_tp_dim_open(struct inode *inode, struct file *file) 957static int applespi_tp_dim_open(struct inode *inode, struct file *file)
@@ -1490,8 +1494,7 @@ static void applespi_got_data(struct applespi_data *applespi)
1490 size_t tp_len; 1494 size_t tp_len;
1491 1495
1492 tp = &message->touchpad; 1496 tp = &message->touchpad;
1493 tp_len = sizeof(*tp) + 1497 tp_len = struct_size(tp, fingers, tp->number_of_fingers);
1494 tp->number_of_fingers * sizeof(tp->fingers[0]);
1495 1498
1496 if (le16_to_cpu(message->length) + 2 != tp_len) { 1499 if (le16_to_cpu(message->length) + 2 != tp_len) {
1497 dev_warn_ratelimited(&applespi->spi->dev, 1500 dev_warn_ratelimited(&applespi->spi->dev,
@@ -1611,8 +1614,8 @@ static void applespi_save_bl_level(struct applespi_data *applespi,
1611 efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | 1614 efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
1612 EFI_VARIABLE_RUNTIME_ACCESS; 1615 EFI_VARIABLE_RUNTIME_ACCESS;
1613 1616
1614 sts = efivar_entry_set_safe(EFI_BL_LEVEL_NAME, efi_guid, efi_attr, true, 1617 sts = efivar_entry_set_safe((efi_char16_t *)EFI_BL_LEVEL_NAME, efi_guid,
1615 efi_data_len, &efi_data); 1618 efi_attr, true, efi_data_len, &efi_data);
1616 if (sts) 1619 if (sts)
1617 dev_warn(&applespi->spi->dev, 1620 dev_warn(&applespi->spi->dev,
1618 "Error saving backlight level to EFI vars: %d\n", sts); 1621 "Error saving backlight level to EFI vars: %d\n", sts);
@@ -1953,7 +1956,7 @@ static const struct acpi_device_id applespi_acpi_match[] = {
1953}; 1956};
1954MODULE_DEVICE_TABLE(acpi, applespi_acpi_match); 1957MODULE_DEVICE_TABLE(acpi, applespi_acpi_match);
1955 1958
1956const struct dev_pm_ops applespi_pm_ops = { 1959static const struct dev_pm_ops applespi_pm_ops = {
1957 SET_SYSTEM_SLEEP_PM_OPS(applespi_suspend, applespi_resume) 1960 SET_SYSTEM_SLEEP_PM_OPS(applespi_suspend, applespi_resume)
1958 .poweroff_late = applespi_poweroff_late, 1961 .poweroff_late = applespi_poweroff_late,
1959}; 1962};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d8434b7b623..04fe43440a3c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1827,6 +1827,31 @@ static int elantech_create_smbus(struct psmouse *psmouse,
1827 leave_breadcrumbs); 1827 leave_breadcrumbs);
1828} 1828}
1829 1829
1830static bool elantech_use_host_notify(struct psmouse *psmouse,
1831 struct elantech_device_info *info)
1832{
1833 if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
1834 return true;
1835
1836 switch (info->bus) {
1837 case ETP_BUS_PS2_ONLY:
1838 /* expected case */
1839 break;
1840 case ETP_BUS_SMB_HST_NTFY_ONLY:
1841 case ETP_BUS_PS2_SMB_HST_NTFY:
1842 /* SMbus implementation is stable since 2018 */
1843 if (dmi_get_bios_year() >= 2018)
1844 return true;
1845 /* fall through */
1846 default:
1847 psmouse_dbg(psmouse,
1848 "Ignoring SMBus bus provider %d\n", info->bus);
1849 break;
1850 }
1851
1852 return false;
1853}
1854
1830/** 1855/**
1831 * elantech_setup_smbus - called once the PS/2 devices are enumerated 1856 * elantech_setup_smbus - called once the PS/2 devices are enumerated
1832 * and decides to instantiate a SMBus InterTouch device. 1857 * and decides to instantiate a SMBus InterTouch device.
@@ -1846,7 +1871,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
1846 * i2c_blacklist_pnp_ids. 1871 * i2c_blacklist_pnp_ids.
1847 * Old ICs are up to the user to decide. 1872 * Old ICs are up to the user to decide.
1848 */ 1873 */
1849 if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) || 1874 if (!elantech_use_host_notify(psmouse, info) ||
1850 psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids)) 1875 psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
1851 return -ENXIO; 1876 return -ENXIO;
1852 } 1877 }
@@ -1866,34 +1891,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
1866 return 0; 1891 return 0;
1867} 1892}
1868 1893
1869static bool elantech_use_host_notify(struct psmouse *psmouse,
1870 struct elantech_device_info *info)
1871{
1872 if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
1873 return true;
1874
1875 switch (info->bus) {
1876 case ETP_BUS_PS2_ONLY:
1877 /* expected case */
1878 break;
1879 case ETP_BUS_SMB_ALERT_ONLY:
1880 /* fall-through */
1881 case ETP_BUS_PS2_SMB_ALERT:
1882 psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
1883 break;
1884 case ETP_BUS_SMB_HST_NTFY_ONLY:
1885 /* fall-through */
1886 case ETP_BUS_PS2_SMB_HST_NTFY:
1887 return true;
1888 default:
1889 psmouse_dbg(psmouse,
1890 "Ignoring SMBus bus provider %d.\n",
1891 info->bus);
1892 }
1893
1894 return false;
1895}
1896
1897int elantech_init_smbus(struct psmouse *psmouse) 1894int elantech_init_smbus(struct psmouse *psmouse)
1898{ 1895{
1899 struct elantech_device_info info; 1896 struct elantech_device_info info;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index b1956ed4c0dd..46bbe99d6511 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
182 "LEN2055", /* E580 */ 182 "LEN2055", /* E580 */
183 "SYN3052", /* HP EliteBook 840 G4 */ 183 "SYN3052", /* HP EliteBook 840 G4 */
184 "SYN3221", /* HP 15-ay000 */ 184 "SYN3221", /* HP 15-ay000 */
185 "SYN323d", /* HP Spectre X360 13-w013dx */
185 NULL 186 NULL
186}; 187};
187 188
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 88ae7c2ac3c8..e486a8a74c40 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
237 237
238static void hv_kbd_on_channel_callback(void *context) 238static void hv_kbd_on_channel_callback(void *context)
239{ 239{
240 struct vmpacket_descriptor *desc;
240 struct hv_device *hv_dev = context; 241 struct hv_device *hv_dev = context;
241 void *buffer;
242 int bufferlen = 0x100; /* Start with sensible size */
243 u32 bytes_recvd; 242 u32 bytes_recvd;
244 u64 req_id; 243 u64 req_id;
245 int error;
246 244
247 buffer = kmalloc(bufferlen, GFP_ATOMIC); 245 foreach_vmbus_pkt(desc, hv_dev->channel) {
248 if (!buffer) 246 bytes_recvd = desc->len8 * 8;
249 return; 247 req_id = desc->trans_id;
250
251 while (1) {
252 error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
253 &bytes_recvd, &req_id);
254 switch (error) {
255 case 0:
256 if (bytes_recvd == 0) {
257 kfree(buffer);
258 return;
259 }
260
261 hv_kbd_handle_received_packet(hv_dev, buffer,
262 bytes_recvd, req_id);
263 break;
264 248
265 case -ENOBUFS: 249 hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
266 kfree(buffer); 250 req_id);
267 /* Handle large packet */
268 bufferlen = bytes_recvd;
269 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
270 if (!buffer)
271 return;
272 break;
273 }
274 } 251 }
275} 252}
276 253
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 04b85571f41e..aa577898e952 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -117,6 +117,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
117 if (intf->cur_altsetting->desc.bNumEndpoints < 1) 117 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
118 return -ENODEV; 118 return -ENODEV;
119 119
120 endpoint = &intf->cur_altsetting->endpoint[0].desc;
121 if (!usb_endpoint_is_int_in(endpoint))
122 return -ENODEV;
123
120 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); 124 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
121 input_dev = input_allocate_device(); 125 input_dev = input_allocate_device();
122 if (!kbtab || !input_dev) 126 if (!kbtab || !input_dev)
@@ -155,8 +159,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
155 input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); 159 input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
156 input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); 160 input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
157 161
158 endpoint = &intf->cur_altsetting->endpoint[0].desc;
159
160 usb_fill_int_urb(kbtab->irq, dev, 162 usb_fill_int_urb(kbtab->irq, dev,
161 usb_rcvintpipe(dev, endpoint->bEndpointAddress), 163 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
162 kbtab->data, 8, 164 kbtab->data, 8,
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index a2cec6cacf57..16d70201de4a 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1659,6 +1659,8 @@ static int usbtouch_probe(struct usb_interface *intf,
1659 if (!usbtouch || !input_dev) 1659 if (!usbtouch || !input_dev)
1660 goto out_free; 1660 goto out_free;
1661 1661
1662 mutex_init(&usbtouch->pm_mutex);
1663
1662 type = &usbtouch_dev_info[id->driver_info]; 1664 type = &usbtouch_dev_info[id->driver_info];
1663 usbtouch->type = type; 1665 usbtouch->type = type;
1664 if (!type->process_pkt) 1666 if (!type->process_pkt)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index eb104c719629..4413aa67000e 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -23,6 +23,8 @@
23#include <linux/mem_encrypt.h> 23#include <linux/mem_encrypt.h>
24#include <asm/pci-direct.h> 24#include <asm/pci-direct.h>
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/apic.h>
27#include <asm/msidef.h>
26#include <asm/gart.h> 28#include <asm/gart.h>
27#include <asm/x86_init.h> 29#include <asm/x86_init.h>
28#include <asm/iommu_table.h> 30#include <asm/iommu_table.h>
@@ -1920,6 +1922,90 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
1920 return 0; 1922 return 0;
1921} 1923}
1922 1924
1925#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
1926#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
1927#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
1928#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
1929
1930/**
1931 * Setup the IntCapXT registers with interrupt routing information
1932 * based on the PCI MSI capability block registers, accessed via
1933 * MMIO MSI address low/hi and MSI data registers.
1934 */
1935static void iommu_update_intcapxt(struct amd_iommu *iommu)
1936{
1937 u64 val;
1938 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1939 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1940 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1941 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1942 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1943
1944 if (x2apic_enabled())
1945 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1946
1947 val = XT_INT_VEC(data & 0xFF) |
1948 XT_INT_DEST_MODE(dm) |
1949 XT_INT_DEST_LO(dest) |
1950 XT_INT_DEST_HI(dest);
1951
1952 /**
1953 * Current IOMMU implemtation uses the same IRQ for all
1954 * 3 IOMMU interrupts.
1955 */
1956 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
1957 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
1958 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
1959}
1960
1961static void _irq_notifier_notify(struct irq_affinity_notify *notify,
1962 const cpumask_t *mask)
1963{
1964 struct amd_iommu *iommu;
1965
1966 for_each_iommu(iommu) {
1967 if (iommu->dev->irq == notify->irq) {
1968 iommu_update_intcapxt(iommu);
1969 break;
1970 }
1971 }
1972}
1973
1974static void _irq_notifier_release(struct kref *ref)
1975{
1976}
1977
1978static int iommu_init_intcapxt(struct amd_iommu *iommu)
1979{
1980 int ret;
1981 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
1982
1983 /**
1984 * IntCapXT requires XTSup=1, which can be inferred
1985 * amd_iommu_xt_mode.
1986 */
1987 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
1988 return 0;
1989
1990 /**
1991 * Also, we need to setup notifier to update the IntCapXT registers
1992 * whenever the irq affinity is changed from user-space.
1993 */
1994 notify->irq = iommu->dev->irq;
1995 notify->notify = _irq_notifier_notify,
1996 notify->release = _irq_notifier_release,
1997 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
1998 if (ret) {
1999 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2000 iommu->devid, iommu->dev->irq);
2001 return ret;
2002 }
2003
2004 iommu_update_intcapxt(iommu);
2005 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2006 return ret;
2007}
2008
1923static int iommu_init_msi(struct amd_iommu *iommu) 2009static int iommu_init_msi(struct amd_iommu *iommu)
1924{ 2010{
1925 int ret; 2011 int ret;
@@ -1936,6 +2022,10 @@ static int iommu_init_msi(struct amd_iommu *iommu)
1936 return ret; 2022 return ret;
1937 2023
1938enable_faults: 2024enable_faults:
2025 ret = iommu_init_intcapxt(iommu);
2026 if (ret)
2027 return ret;
2028
1939 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2029 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1940 2030
1941 if (iommu->ppr_log != NULL) 2031 if (iommu->ppr_log != NULL)
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 52c35d557fad..64edd5a9694c 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -60,6 +60,12 @@
60#define MMIO_PPR_LOG_OFFSET 0x0038 60#define MMIO_PPR_LOG_OFFSET 0x0038
61#define MMIO_GA_LOG_BASE_OFFSET 0x00e0 61#define MMIO_GA_LOG_BASE_OFFSET 0x00e0
62#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 62#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
63#define MMIO_MSI_ADDR_LO_OFFSET 0x015C
64#define MMIO_MSI_ADDR_HI_OFFSET 0x0160
65#define MMIO_MSI_DATA_OFFSET 0x0164
66#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
67#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
68#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
63#define MMIO_CMD_HEAD_OFFSET 0x2000 69#define MMIO_CMD_HEAD_OFFSET 0x2000
64#define MMIO_CMD_TAIL_OFFSET 0x2008 70#define MMIO_CMD_TAIL_OFFSET 0x2008
65#define MMIO_EVT_HEAD_OFFSET 0x2010 71#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -150,6 +156,7 @@
150#define CONTROL_GALOG_EN 0x1CULL 156#define CONTROL_GALOG_EN 0x1CULL
151#define CONTROL_GAINT_EN 0x1DULL 157#define CONTROL_GAINT_EN 0x1DULL
152#define CONTROL_XT_EN 0x32ULL 158#define CONTROL_XT_EN 0x32ULL
159#define CONTROL_INTCAPXT_EN 0x33ULL
153 160
154#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 161#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
155#define CTRL_INV_TO_NONE 0 162#define CTRL_INV_TO_NONE 0
@@ -592,6 +599,8 @@ struct amd_iommu {
592 /* DebugFS Info */ 599 /* DebugFS Info */
593 struct dentry *debugfs; 600 struct dentry *debugfs;
594#endif 601#endif
602 /* IRQ notifier for IntCapXT interrupt */
603 struct irq_affinity_notify intcapxt_notify;
595}; 604};
596 605
597static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 606static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..c5c93e48b4db 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
1186 ste_live = true; 1186 ste_live = true;
1187 break; 1187 break;
1188 case STRTAB_STE_0_CFG_ABORT: 1188 case STRTAB_STE_0_CFG_ABORT:
1189 if (disable_bypass) 1189 BUG_ON(!disable_bypass);
1190 break; 1190 break;
1191 default: 1191 default:
1192 BUG(); /* STE corruption */ 1192 BUG(); /* STE corruption */
1193 } 1193 }
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..f68a62c3c32b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
459{ 459{
460 struct iommu_domain *domain = iommu_get_dma_domain(dev); 460 struct iommu_domain *domain = iommu_get_dma_domain(dev);
461 struct iommu_dma_cookie *cookie = domain->iova_cookie; 461 struct iommu_dma_cookie *cookie = domain->iova_cookie;
462 size_t iova_off = 0; 462 struct iova_domain *iovad = &cookie->iovad;
463 size_t iova_off = iova_offset(iovad, phys);
463 dma_addr_t iova; 464 dma_addr_t iova;
464 465
465 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { 466 size = iova_align(iovad, size + iova_off);
466 iova_off = iova_offset(&cookie->iovad, phys);
467 size = iova_align(&cookie->iovad, size + iova_off);
468 }
469 467
470 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 468 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
471 if (!iova) 469 if (!iova)
@@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
574 struct iova_domain *iovad = &cookie->iovad; 572 struct iova_domain *iovad = &cookie->iovad;
575 bool coherent = dev_is_dma_coherent(dev); 573 bool coherent = dev_is_dma_coherent(dev);
576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 574 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
577 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 575 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 576 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
579 struct page **pages; 577 struct page **pages;
580 struct sg_table sgt; 578 struct sg_table sgt;
@@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
764 * - and wouldn't make the resulting output segment too long 762 * - and wouldn't make the resulting output segment too long
765 */ 763 */
766 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 764 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
767 (cur_len + s_length <= max_len)) { 765 (max_len - cur_len >= s_length)) {
768 /* ...then concatenate it with the previous one */ 766 /* ...then concatenate it with the previous one */
769 cur_len += s_length; 767 cur_len += s_length;
770 } else { 768 } else {
@@ -967,15 +965,18 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
967{ 965{
968 bool coherent = dev_is_dma_coherent(dev); 966 bool coherent = dev_is_dma_coherent(dev);
969 size_t alloc_size = PAGE_ALIGN(size); 967 size_t alloc_size = PAGE_ALIGN(size);
968 int node = dev_to_node(dev);
970 struct page *page = NULL; 969 struct page *page = NULL;
971 void *cpu_addr; 970 void *cpu_addr;
972 971
973 page = dma_alloc_contiguous(dev, alloc_size, gfp); 972 page = dma_alloc_contiguous(dev, alloc_size, gfp);
974 if (!page) 973 if (!page)
974 page = alloc_pages_node(node, gfp, get_order(alloc_size));
975 if (!page)
975 return NULL; 976 return NULL;
976 977
977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 978 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
978 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 979 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
979 980
980 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 981 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
981 VM_USERMAP, prot, __builtin_return_address(0)); 982 VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1036,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1035 unsigned long pfn, off = vma->vm_pgoff; 1036 unsigned long pfn, off = vma->vm_pgoff;
1036 int ret; 1037 int ret;
1037 1038
1038 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 1039 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1039 1040
1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1041 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1041 return ret; 1042 return ret;
@@ -1147,16 +1148,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1147 if (!msi_page) 1148 if (!msi_page)
1148 return NULL; 1149 return NULL;
1149 1150
1150 iova = __iommu_dma_map(dev, msi_addr, size, prot); 1151 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1151 if (iova == DMA_MAPPING_ERROR) 1152 if (!iova)
1152 goto out_free_page; 1153 goto out_free_page;
1153 1154
1155 if (iommu_map(domain, iova, msi_addr, size, prot))
1156 goto out_free_iova;
1157
1154 INIT_LIST_HEAD(&msi_page->list); 1158 INIT_LIST_HEAD(&msi_page->list);
1155 msi_page->phys = msi_addr; 1159 msi_page->phys = msi_addr;
1156 msi_page->iova = iova; 1160 msi_page->iova = iova;
1157 list_add(&msi_page->list, &cookie->msi_page_list); 1161 list_add(&msi_page->list, &cookie->msi_page_list);
1158 return msi_page; 1162 return msi_page;
1159 1163
1164out_free_iova:
1165 iommu_dma_free_iova(cookie, iova, size);
1160out_free_page: 1166out_free_page:
1161 kfree(msi_page); 1167 kfree(msi_page);
1162 return NULL; 1168 return NULL;
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 73a552914455..471f05d452e0 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -162,9 +162,9 @@ static inline void print_tbl_walk(struct seq_file *m)
162 (u64)0, (u64)0, (u64)0); 162 (u64)0, (u64)0, (u64)0);
163 else 163 else
164 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", 164 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
165 tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0], 165 tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
166 tbl_wlk->pasid_tbl_entry->val[1], 166 tbl_wlk->pasid_tbl_entry->val[1],
167 tbl_wlk->pasid_tbl_entry->val[2]); 167 tbl_wlk->pasid_tbl_entry->val[0]);
168} 168}
169 169
170static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, 170static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
235 tbl_wlk.ctx_entry = context; 235 tbl_wlk.ctx_entry = context;
236 m->private = &tbl_wlk; 236 m->private = &tbl_wlk;
237 237
238 if (pasid_supported(iommu) && is_pasid_enabled(context)) { 238 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
240 pasid_dir_size = get_pasid_dir_size(context); 240 pasid_dir_size = get_pasid_dir_size(context);
241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); 241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac4172c02244..12d094d08c0a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain);
339static void domain_remove_dev_info(struct dmar_domain *domain); 339static void domain_remove_dev_info(struct dmar_domain *domain);
340static void dmar_remove_one_dev_info(struct device *dev); 340static void dmar_remove_one_dev_info(struct device *dev);
341static void __dmar_remove_one_dev_info(struct device_domain_info *info); 341static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342static void domain_context_clear(struct intel_iommu *iommu,
343 struct device *dev);
344static int domain_detach_iommu(struct dmar_domain *domain, 342static int domain_detach_iommu(struct dmar_domain *domain,
345 struct intel_iommu *iommu); 343 struct intel_iommu *iommu);
346static bool device_is_rmrr_locked(struct device *dev); 344static bool device_is_rmrr_locked(struct device *dev);
@@ -1833,9 +1831,65 @@ static inline int guestwidth_to_adjustwidth(int gaw)
1833 return agaw; 1831 return agaw;
1834} 1832}
1835 1833
1834static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1835 int guest_width)
1836{
1837 int adjust_width, agaw;
1838 unsigned long sagaw;
1839 int err;
1840
1841 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1842
1843 err = init_iova_flush_queue(&domain->iovad,
1844 iommu_flush_iova, iova_entry_free);
1845 if (err)
1846 return err;
1847
1848 domain_reserve_special_ranges(domain);
1849
1850 /* calculate AGAW */
1851 if (guest_width > cap_mgaw(iommu->cap))
1852 guest_width = cap_mgaw(iommu->cap);
1853 domain->gaw = guest_width;
1854 adjust_width = guestwidth_to_adjustwidth(guest_width);
1855 agaw = width_to_agaw(adjust_width);
1856 sagaw = cap_sagaw(iommu->cap);
1857 if (!test_bit(agaw, &sagaw)) {
1858 /* hardware doesn't support it, choose a bigger one */
1859 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1860 agaw = find_next_bit(&sagaw, 5, agaw);
1861 if (agaw >= 5)
1862 return -ENODEV;
1863 }
1864 domain->agaw = agaw;
1865
1866 if (ecap_coherent(iommu->ecap))
1867 domain->iommu_coherency = 1;
1868 else
1869 domain->iommu_coherency = 0;
1870
1871 if (ecap_sc_support(iommu->ecap))
1872 domain->iommu_snooping = 1;
1873 else
1874 domain->iommu_snooping = 0;
1875
1876 if (intel_iommu_superpage)
1877 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1878 else
1879 domain->iommu_superpage = 0;
1880
1881 domain->nid = iommu->node;
1882
1883 /* always allocate the top pgd */
1884 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1885 if (!domain->pgd)
1886 return -ENOMEM;
1887 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1888 return 0;
1889}
1890
1836static void domain_exit(struct dmar_domain *domain) 1891static void domain_exit(struct dmar_domain *domain)
1837{ 1892{
1838 struct page *freelist;
1839 1893
1840 /* Remove associated devices and clear attached or cached domains */ 1894 /* Remove associated devices and clear attached or cached domains */
1841 domain_remove_dev_info(domain); 1895 domain_remove_dev_info(domain);
@@ -1843,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain)
1843 /* destroy iovas */ 1897 /* destroy iovas */
1844 put_iova_domain(&domain->iovad); 1898 put_iova_domain(&domain->iovad);
1845 1899
1846 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 1900 if (domain->pgd) {
1901 struct page *freelist;
1847 1902
1848 dma_free_pagelist(freelist); 1903 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1904 dma_free_pagelist(freelist);
1905 }
1849 1906
1850 free_domain_mem(domain); 1907 free_domain_mem(domain);
1851} 1908}
@@ -2048,26 +2105,9 @@ out_unlock:
2048 return ret; 2105 return ret;
2049} 2106}
2050 2107
2051struct domain_context_mapping_data {
2052 struct dmar_domain *domain;
2053 struct intel_iommu *iommu;
2054 struct pasid_table *table;
2055};
2056
2057static int domain_context_mapping_cb(struct pci_dev *pdev,
2058 u16 alias, void *opaque)
2059{
2060 struct domain_context_mapping_data *data = opaque;
2061
2062 return domain_context_mapping_one(data->domain, data->iommu,
2063 data->table, PCI_BUS_NUM(alias),
2064 alias & 0xff);
2065}
2066
2067static int 2108static int
2068domain_context_mapping(struct dmar_domain *domain, struct device *dev) 2109domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2069{ 2110{
2070 struct domain_context_mapping_data data;
2071 struct pasid_table *table; 2111 struct pasid_table *table;
2072 struct intel_iommu *iommu; 2112 struct intel_iommu *iommu;
2073 u8 bus, devfn; 2113 u8 bus, devfn;
@@ -2077,17 +2117,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2077 return -ENODEV; 2117 return -ENODEV;
2078 2118
2079 table = intel_pasid_get_table(dev); 2119 table = intel_pasid_get_table(dev);
2080 2120 return domain_context_mapping_one(domain, iommu, table, bus, devfn);
2081 if (!dev_is_pci(dev))
2082 return domain_context_mapping_one(domain, iommu, table,
2083 bus, devfn);
2084
2085 data.domain = domain;
2086 data.iommu = iommu;
2087 data.table = table;
2088
2089 return pci_for_each_dma_alias(to_pci_dev(dev),
2090 &domain_context_mapping_cb, &data);
2091} 2121}
2092 2122
2093static int domain_context_mapped_cb(struct pci_dev *pdev, 2123static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -2513,31 +2543,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2513 return 0; 2543 return 0;
2514} 2544}
2515 2545
2516static int domain_init(struct dmar_domain *domain, int guest_width)
2517{
2518 int adjust_width;
2519
2520 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
2521 domain_reserve_special_ranges(domain);
2522
2523 /* calculate AGAW */
2524 domain->gaw = guest_width;
2525 adjust_width = guestwidth_to_adjustwidth(guest_width);
2526 domain->agaw = width_to_agaw(adjust_width);
2527
2528 domain->iommu_coherency = 0;
2529 domain->iommu_snooping = 0;
2530 domain->iommu_superpage = 0;
2531 domain->max_addr = 0;
2532
2533 /* always allocate the top pgd */
2534 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
2535 if (!domain->pgd)
2536 return -ENOMEM;
2537 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2538 return 0;
2539}
2540
2541static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) 2546static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2542{ 2547{
2543 struct device_domain_info *info; 2548 struct device_domain_info *info;
@@ -2575,19 +2580,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2575 domain = alloc_domain(0); 2580 domain = alloc_domain(0);
2576 if (!domain) 2581 if (!domain)
2577 return NULL; 2582 return NULL;
2578 2583 if (domain_init(domain, iommu, gaw)) {
2579 if (domain_init(domain, gaw)) {
2580 domain_exit(domain); 2584 domain_exit(domain);
2581 return NULL; 2585 return NULL;
2582 } 2586 }
2583 2587
2584 if (init_iova_flush_queue(&domain->iovad,
2585 iommu_flush_iova,
2586 iova_entry_free)) {
2587 pr_warn("iova flush queue initialization failed\n");
2588 intel_iommu_strict = 1;
2589 }
2590
2591out: 2588out:
2592 return domain; 2589 return domain;
2593} 2590}
@@ -2692,6 +2689,8 @@ static int domain_prepare_identity_map(struct device *dev,
2692 return iommu_domain_identity_map(domain, start, end); 2689 return iommu_domain_identity_map(domain, start, end);
2693} 2690}
2694 2691
2692static int md_domain_init(struct dmar_domain *domain, int guest_width);
2693
2695static int __init si_domain_init(int hw) 2694static int __init si_domain_init(int hw)
2696{ 2695{
2697 struct dmar_rmrr_unit *rmrr; 2696 struct dmar_rmrr_unit *rmrr;
@@ -2702,7 +2701,7 @@ static int __init si_domain_init(int hw)
2702 if (!si_domain) 2701 if (!si_domain)
2703 return -EFAULT; 2702 return -EFAULT;
2704 2703
2705 if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 2704 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2706 domain_exit(si_domain); 2705 domain_exit(si_domain);
2707 return -EFAULT; 2706 return -EFAULT;
2708 } 2707 }
@@ -3450,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
3450 dmar_domain = to_dmar_domain(domain); 3449 dmar_domain = to_dmar_domain(domain);
3451 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 3450 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
3452 } 3451 }
3452 dmar_remove_one_dev_info(dev);
3453 get_private_domain_for_dev(dev); 3453 get_private_domain_for_dev(dev);
3454 } 3454 }
3455 3455
@@ -3564,7 +3564,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3564 3564
3565 freelist = domain_unmap(domain, start_pfn, last_pfn); 3565 freelist = domain_unmap(domain, start_pfn, last_pfn);
3566 3566
3567 if (intel_iommu_strict || (pdev && pdev->untrusted)) { 3567 if (intel_iommu_strict || (pdev && pdev->untrusted) ||
3568 !has_iova_flush_queue(&domain->iovad)) {
3568 iommu_flush_iotlb_psi(iommu, domain, start_pfn, 3569 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3569 nrpages, !freelist, 0); 3570 nrpages, !freelist, 0);
3570 /* free iova */ 3571 /* free iova */
@@ -4758,28 +4759,6 @@ out_free_dmar:
4758 return ret; 4759 return ret;
4759} 4760}
4760 4761
4761static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4762{
4763 struct intel_iommu *iommu = opaque;
4764
4765 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4766 return 0;
4767}
4768
4769/*
4770 * NB - intel-iommu lacks any sort of reference counting for the users of
4771 * dependent devices. If multiple endpoints have intersecting dependent
4772 * devices, unbinding the driver from any one of them will possibly leave
4773 * the others unable to operate.
4774 */
4775static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4776{
4777 if (!iommu || !dev || !dev_is_pci(dev))
4778 return;
4779
4780 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4781}
4782
4783static void __dmar_remove_one_dev_info(struct device_domain_info *info) 4762static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4784{ 4763{
4785 struct dmar_domain *domain; 4764 struct dmar_domain *domain;
@@ -4800,7 +4779,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4800 PASID_RID2PASID); 4779 PASID_RID2PASID);
4801 4780
4802 iommu_disable_dev_iotlb(info); 4781 iommu_disable_dev_iotlb(info);
4803 domain_context_clear(iommu, info->dev); 4782 domain_context_clear_one(iommu, info->bus, info->devfn);
4804 intel_pasid_free_table(info->dev); 4783 intel_pasid_free_table(info->dev);
4805 } 4784 }
4806 4785
@@ -4812,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4812 4791
4813 /* free the private domain */ 4792 /* free the private domain */
4814 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && 4793 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
4815 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) 4794 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
4795 list_empty(&domain->devices))
4816 domain_exit(info->domain); 4796 domain_exit(info->domain);
4817 4797
4818 free_devinfo_mem(info); 4798 free_devinfo_mem(info);
@@ -4825,10 +4805,36 @@ static void dmar_remove_one_dev_info(struct device *dev)
4825 4805
4826 spin_lock_irqsave(&device_domain_lock, flags); 4806 spin_lock_irqsave(&device_domain_lock, flags);
4827 info = dev->archdata.iommu; 4807 info = dev->archdata.iommu;
4828 __dmar_remove_one_dev_info(info); 4808 if (info)
4809 __dmar_remove_one_dev_info(info);
4829 spin_unlock_irqrestore(&device_domain_lock, flags); 4810 spin_unlock_irqrestore(&device_domain_lock, flags);
4830} 4811}
4831 4812
4813static int md_domain_init(struct dmar_domain *domain, int guest_width)
4814{
4815 int adjust_width;
4816
4817 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4818 domain_reserve_special_ranges(domain);
4819
4820 /* calculate AGAW */
4821 domain->gaw = guest_width;
4822 adjust_width = guestwidth_to_adjustwidth(guest_width);
4823 domain->agaw = width_to_agaw(adjust_width);
4824
4825 domain->iommu_coherency = 0;
4826 domain->iommu_snooping = 0;
4827 domain->iommu_superpage = 0;
4828 domain->max_addr = 0;
4829
4830 /* always allocate the top pgd */
4831 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4832 if (!domain->pgd)
4833 return -ENOMEM;
4834 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4835 return 0;
4836}
4837
4832static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) 4838static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4833{ 4839{
4834 struct dmar_domain *dmar_domain; 4840 struct dmar_domain *dmar_domain;
@@ -4843,7 +4849,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4843 pr_err("Can't allocate dmar_domain\n"); 4849 pr_err("Can't allocate dmar_domain\n");
4844 return NULL; 4850 return NULL;
4845 } 4851 }
4846 if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 4852 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4847 pr_err("Domain initialization failed\n"); 4853 pr_err("Domain initialization failed\n");
4848 domain_exit(dmar_domain); 4854 domain_exit(dmar_domain);
4849 return NULL; 4855 return NULL;
@@ -5278,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
5278 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) { 5284 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
5279 ret = iommu_request_dm_for_dev(dev); 5285 ret = iommu_request_dm_for_dev(dev);
5280 if (ret) { 5286 if (ret) {
5287 dmar_remove_one_dev_info(dev);
5281 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5288 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5282 domain_add_dev_info(si_domain, dev); 5289 domain_add_dev_info(si_domain, dev);
5283 dev_info(dev, 5290 dev_info(dev,
@@ -5288,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
5288 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) { 5295 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
5289 ret = iommu_request_dma_domain_for_dev(dev); 5296 ret = iommu_request_dma_domain_for_dev(dev);
5290 if (ret) { 5297 if (ret) {
5298 dmar_remove_one_dev_info(dev);
5291 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5299 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5292 if (!get_private_domain_for_dev(dev)) { 5300 if (!get_private_domain_for_dev(dev)) {
5293 dev_warn(dev, 5301 dev_warn(dev,
@@ -5313,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
5313 if (!iommu) 5321 if (!iommu)
5314 return; 5322 return;
5315 5323
5324 dmar_remove_one_dev_info(dev);
5325
5316 iommu_group_remove_device(dev); 5326 iommu_group_remove_device(dev);
5317 5327
5318 iommu_device_unlink(&iommu->iommu, dev); 5328 iommu_device_unlink(&iommu->iommu, dev);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index d499b2621239..3e1a8a675572 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
54} 54}
55EXPORT_SYMBOL_GPL(init_iova_domain); 55EXPORT_SYMBOL_GPL(init_iova_domain);
56 56
57bool has_iova_flush_queue(struct iova_domain *iovad)
58{
59 return !!iovad->fq;
60}
61
57static void free_iova_flush_queue(struct iova_domain *iovad) 62static void free_iova_flush_queue(struct iova_domain *iovad)
58{ 63{
59 if (!iovad->fq) 64 if (!has_iova_flush_queue(iovad))
60 return; 65 return;
61 66
62 if (timer_pending(&iovad->fq_timer)) 67 if (timer_pending(&iovad->fq_timer))
@@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
74int init_iova_flush_queue(struct iova_domain *iovad, 79int init_iova_flush_queue(struct iova_domain *iovad,
75 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) 80 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
76{ 81{
82 struct iova_fq __percpu *queue;
77 int cpu; 83 int cpu;
78 84
79 atomic64_set(&iovad->fq_flush_start_cnt, 0); 85 atomic64_set(&iovad->fq_flush_start_cnt, 0);
80 atomic64_set(&iovad->fq_flush_finish_cnt, 0); 86 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
81 87
82 iovad->fq = alloc_percpu(struct iova_fq); 88 queue = alloc_percpu(struct iova_fq);
83 if (!iovad->fq) 89 if (!queue)
84 return -ENOMEM; 90 return -ENOMEM;
85 91
86 iovad->flush_cb = flush_cb; 92 iovad->flush_cb = flush_cb;
@@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
89 for_each_possible_cpu(cpu) { 95 for_each_possible_cpu(cpu) {
90 struct iova_fq *fq; 96 struct iova_fq *fq;
91 97
92 fq = per_cpu_ptr(iovad->fq, cpu); 98 fq = per_cpu_ptr(queue, cpu);
93 fq->head = 0; 99 fq->head = 0;
94 fq->tail = 0; 100 fq->tail = 0;
95 101
96 spin_lock_init(&fq->lock); 102 spin_lock_init(&fq->lock);
97 } 103 }
98 104
105 smp_wmb();
106
107 iovad->fq = queue;
108
99 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); 109 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
100 atomic_set(&iovad->fq_timer_on, 0); 110 atomic_set(&iovad->fq_timer_on, 0);
101 111
@@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
127 struct iova *cached_iova; 137 struct iova *cached_iova;
128 138
129 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); 139 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
130 if (free->pfn_hi < iovad->dma_32bit_pfn && 140 if (free == cached_iova ||
131 free->pfn_lo >= cached_iova->pfn_lo) { 141 (free->pfn_hi < iovad->dma_32bit_pfn &&
142 free->pfn_lo >= cached_iova->pfn_lo)) {
132 iovad->cached32_node = rb_next(&free->node); 143 iovad->cached32_node = rb_next(&free->node);
133 iovad->max32_alloc_size = iovad->dma_32bit_pfn; 144 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
134 } 145 }
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 433f4d2ee956..80a740df0737 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * Virtio driver for the paravirtualized IOMMU 3 * Virtio driver for the paravirtualized IOMMU
4 * 4 *
5 * Copyright (C) 2018 Arm Limited 5 * Copyright (C) 2019 Arm Limited
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -47,7 +47,10 @@ struct viommu_dev {
47 /* Device configuration */ 47 /* Device configuration */
48 struct iommu_domain_geometry geometry; 48 struct iommu_domain_geometry geometry;
49 u64 pgsize_bitmap; 49 u64 pgsize_bitmap;
50 u8 domain_bits; 50 u32 first_domain;
51 u32 last_domain;
52 /* Supported MAP flags */
53 u32 map_flags;
51 u32 probe_size; 54 u32 probe_size;
52}; 55};
53 56
@@ -62,6 +65,7 @@ struct viommu_domain {
62 struct viommu_dev *viommu; 65 struct viommu_dev *viommu;
63 struct mutex mutex; /* protects viommu pointer */ 66 struct mutex mutex; /* protects viommu pointer */
64 unsigned int id; 67 unsigned int id;
68 u32 map_flags;
65 69
66 spinlock_t mappings_lock; 70 spinlock_t mappings_lock;
67 struct rb_root_cached mappings; 71 struct rb_root_cached mappings;
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
113 return -ENOENT; 117 return -ENOENT;
114 case VIRTIO_IOMMU_S_FAULT: 118 case VIRTIO_IOMMU_S_FAULT:
115 return -EFAULT; 119 return -EFAULT;
120 case VIRTIO_IOMMU_S_NOMEM:
121 return -ENOMEM;
116 case VIRTIO_IOMMU_S_IOERR: 122 case VIRTIO_IOMMU_S_IOERR:
117 case VIRTIO_IOMMU_S_DEVERR: 123 case VIRTIO_IOMMU_S_DEVERR:
118 default: 124 default:
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
607{ 613{
608 int ret; 614 int ret;
609 struct viommu_domain *vdomain = to_viommu_domain(domain); 615 struct viommu_domain *vdomain = to_viommu_domain(domain);
610 unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
611 (1U << viommu->domain_bits) - 1;
612 616
613 vdomain->viommu = viommu; 617 vdomain->viommu = viommu;
618 vdomain->map_flags = viommu->map_flags;
614 619
615 domain->pgsize_bitmap = viommu->pgsize_bitmap; 620 domain->pgsize_bitmap = viommu->pgsize_bitmap;
616 domain->geometry = viommu->geometry; 621 domain->geometry = viommu->geometry;
617 622
618 ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); 623 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
624 viommu->last_domain, GFP_KERNEL);
619 if (ret >= 0) 625 if (ret >= 0)
620 vdomain->id = (unsigned int)ret; 626 vdomain->id = (unsigned int)ret;
621 627
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
710 phys_addr_t paddr, size_t size, int prot) 716 phys_addr_t paddr, size_t size, int prot)
711{ 717{
712 int ret; 718 int ret;
713 int flags; 719 u32 flags;
714 struct virtio_iommu_req_map map; 720 struct virtio_iommu_req_map map;
715 struct viommu_domain *vdomain = to_viommu_domain(domain); 721 struct viommu_domain *vdomain = to_viommu_domain(domain);
716 722
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
718 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | 724 (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
719 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); 725 (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
720 726
727 if (flags & ~vdomain->map_flags)
728 return -EINVAL;
729
721 ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); 730 ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
722 if (ret) 731 if (ret)
723 return ret; 732 return ret;
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
1027 goto err_free_vqs; 1036 goto err_free_vqs;
1028 } 1037 }
1029 1038
1030 viommu->domain_bits = 32; 1039 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1040 viommu->last_domain = ~0U;
1031 1041
1032 /* Optional features */ 1042 /* Optional features */
1033 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, 1043 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
1038 struct virtio_iommu_config, input_range.end, 1048 struct virtio_iommu_config, input_range.end,
1039 &input_end); 1049 &input_end);
1040 1050
1041 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, 1051 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1042 struct virtio_iommu_config, domain_bits, 1052 struct virtio_iommu_config, domain_range.start,
1043 &viommu->domain_bits); 1053 &viommu->first_domain);
1054
1055 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
1056 struct virtio_iommu_config, domain_range.end,
1057 &viommu->last_domain);
1044 1058
1045 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, 1059 virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
1046 struct virtio_iommu_config, probe_size, 1060 struct virtio_iommu_config, probe_size,
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
1052 .force_aperture = true, 1066 .force_aperture = true,
1053 }; 1067 };
1054 1068
1069 if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
1070 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1071
1055 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; 1072 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1056 1073
1057 virtio_device_ready(vdev); 1074 virtio_device_ready(vdev);
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
1130 1147
1131static unsigned int features[] = { 1148static unsigned int features[] = {
1132 VIRTIO_IOMMU_F_MAP_UNMAP, 1149 VIRTIO_IOMMU_F_MAP_UNMAP,
1133 VIRTIO_IOMMU_F_DOMAIN_BITS,
1134 VIRTIO_IOMMU_F_INPUT_RANGE, 1150 VIRTIO_IOMMU_F_INPUT_RANGE,
1151 VIRTIO_IOMMU_F_DOMAIN_RANGE,
1135 VIRTIO_IOMMU_F_PROBE, 1152 VIRTIO_IOMMU_F_PROBE,
1153 VIRTIO_IOMMU_F_MMIO,
1136}; 1154};
1137 1155
1138static struct virtio_device_id id_table[] = { 1156static struct virtio_device_id id_table[] = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 730fbe0e2a9d..1b5c3672aea2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe)
3010 3010
3011 if (!its_alloc_vpe_table(vpe_id)) { 3011 if (!its_alloc_vpe_table(vpe_id)) {
3012 its_vpe_id_free(vpe_id); 3012 its_vpe_id_free(vpe_id);
3013 its_free_pending_table(vpe->vpt_page); 3013 its_free_pending_table(vpt_page);
3014 return -ENOMEM; 3014 return -ENOMEM;
3015 } 3015 }
3016 3016
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9bca4896fa6f..96d927f0f91a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void)
771 case 7: 771 case 7:
772 write_gicreg(0, ICC_AP0R3_EL1); 772 write_gicreg(0, ICC_AP0R3_EL1);
773 write_gicreg(0, ICC_AP0R2_EL1); 773 write_gicreg(0, ICC_AP0R2_EL1);
774 /* Fall through */
774 case 6: 775 case 6:
775 write_gicreg(0, ICC_AP0R1_EL1); 776 write_gicreg(0, ICC_AP0R1_EL1);
777 /* Fall through */
776 case 5: 778 case 5:
777 case 4: 779 case 4:
778 write_gicreg(0, ICC_AP0R0_EL1); 780 write_gicreg(0, ICC_AP0R0_EL1);
@@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void)
786 case 7: 788 case 7:
787 write_gicreg(0, ICC_AP1R3_EL1); 789 write_gicreg(0, ICC_AP1R3_EL1);
788 write_gicreg(0, ICC_AP1R2_EL1); 790 write_gicreg(0, ICC_AP1R2_EL1);
791 /* Fall through */
789 case 6: 792 case 6:
790 write_gicreg(0, ICC_AP1R1_EL1); 793 write_gicreg(0, ICC_AP1R1_EL1);
794 /* Fall through */
791 case 5: 795 case 5:
792 case 4: 796 case 4:
793 write_gicreg(0, ICC_AP1R0_EL1); 797 write_gicreg(0, ICC_AP1R0_EL1);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bf2237ac5d09..4f74c15c4755 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
131 .irq_unmask = imx_gpcv2_irq_unmask, 131 .irq_unmask = imx_gpcv2_irq_unmask,
132 .irq_set_wake = imx_gpcv2_irq_set_wake, 132 .irq_set_wake = imx_gpcv2_irq_set_wake,
133 .irq_retrigger = irq_chip_retrigger_hierarchy, 133 .irq_retrigger = irq_chip_retrigger_hierarchy,
134 .irq_set_type = irq_chip_set_type_parent,
134#ifdef CONFIG_SMP 135#ifdef CONFIG_SMP
135 .irq_set_affinity = irq_chip_set_affinity_parent, 136 .irq_set_affinity = irq_chip_set_affinity_parent,
136#endif 137#endif
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3dd28382d5f5..3f09f658e8e2 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
241 241
242 parent = platform_bus_type.dev_root; 242 parent = platform_bus_type.dev_root;
243 child = of_platform_device_create(np, NULL, parent); 243 child = of_platform_device_create(np, NULL, parent);
244 if (!child) 244 if (!child) {
245 of_node_put(np);
245 return -ENOMEM; 246 return -ENOMEM;
247 }
246 248
247 if (of_property_read_u32(child->dev.of_node, "num-pins", 249 if (of_property_read_u32(child->dev.of_node, "num-pins",
248 &num_pins) < 0) { 250 &num_pins) < 0) {
249 dev_err(&pdev->dev, "No num-pins property\n"); 251 dev_err(&pdev->dev, "No num-pins property\n");
252 of_node_put(np);
250 return -EINVAL; 253 return -EINVAL;
251 } 254 }
252 255
@@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
254 mbigen_write_msg, 257 mbigen_write_msg,
255 &mbigen_domain_ops, 258 &mbigen_domain_ops,
256 mgn_chip); 259 mgn_chip);
257 if (!domain) 260 if (!domain) {
261 of_node_put(np);
258 return -ENOMEM; 262 return -ENOMEM;
263 }
259 } 264 }
260 265
261 return 0; 266 return 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 0e224232f746..008a74a1ed44 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1394,6 +1394,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1394 printk(KERN_DEBUG 1394 printk(KERN_DEBUG
1395 "%s: %s: alloc urb for fifo %i failed", 1395 "%s: %s: alloc urb for fifo %i failed",
1396 hw->name, __func__, fifo->fifonum); 1396 hw->name, __func__, fifo->fifonum);
1397 continue;
1397 } 1398 }
1398 fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; 1399 fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1399 fifo->iso[i].indx = i; 1400 fifo->iso[i].indx = i;
@@ -1692,13 +1693,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1692static int 1693static int
1693setup_hfcsusb(struct hfcsusb *hw) 1694setup_hfcsusb(struct hfcsusb *hw)
1694{ 1695{
1696 void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1695 u_char b; 1697 u_char b;
1698 int ret;
1696 1699
1697 if (debug & DBG_HFC_CALL_TRACE) 1700 if (debug & DBG_HFC_CALL_TRACE)
1698 printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); 1701 printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1699 1702
1703 if (!dmabuf)
1704 return -ENOMEM;
1705
1706 ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1707
1708 memcpy(&b, dmabuf, sizeof(u_char));
1709 kfree(dmabuf);
1710
1700 /* check the chip id */ 1711 /* check the chip id */
1701 if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) { 1712 if (ret != 1) {
1702 printk(KERN_DEBUG "%s: %s: cannot read chip id\n", 1713 printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1703 hw->name, __func__); 1714 hw->name, __func__);
1704 return 1; 1715 return 1;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 276065c888bc..23f1f41c8602 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -852,6 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
852 break; 852 break;
853 case SMU_I2C_TRANSFER_COMBINED: 853 case SMU_I2C_TRANSFER_COMBINED:
854 cmd->info.devaddr &= 0xfe; 854 cmd->info.devaddr &= 0xfe;
855 /* fall through */
855 case SMU_I2C_TRANSFER_STDSUB: 856 case SMU_I2C_TRANSFER_STDSUB:
856 if (cmd->info.sublen > 3) 857 if (cmd->info.sublen > 3)
857 return -EINVAL; 858 return -EINVAL;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 26e374fbf57c..20ed838e9413 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -931,6 +931,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
931 if (dc->io_disable) { 931 if (dc->io_disable) {
932 pr_err("I/O disabled on cached dev %s", 932 pr_err("I/O disabled on cached dev %s",
933 dc->backing_dev_name); 933 dc->backing_dev_name);
934 kfree(env[1]);
935 kfree(env[2]);
936 kfree(buf);
934 return -EIO; 937 return -EIO;
935 } 938 }
936 939
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 9f0826712845..e2059af90791 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -23,24 +23,28 @@ static const char * const bch_cache_modes[] = {
23 "writethrough", 23 "writethrough",
24 "writeback", 24 "writeback",
25 "writearound", 25 "writearound",
26 "none" 26 "none",
27 NULL
27}; 28};
28 29
29/* Default is 0 ("auto") */ 30/* Default is 0 ("auto") */
30static const char * const bch_stop_on_failure_modes[] = { 31static const char * const bch_stop_on_failure_modes[] = {
31 "auto", 32 "auto",
32 "always" 33 "always",
34 NULL
33}; 35};
34 36
35static const char * const cache_replacement_policies[] = { 37static const char * const cache_replacement_policies[] = {
36 "lru", 38 "lru",
37 "fifo", 39 "fifo",
38 "random" 40 "random",
41 NULL
39}; 42};
40 43
41static const char * const error_actions[] = { 44static const char * const error_actions[] = {
42 "unregister", 45 "unregister",
43 "panic" 46 "panic",
47 NULL
44}; 48};
45 49
46write_attribute(attach); 50write_attribute(attach);
@@ -338,7 +342,7 @@ STORE(__cached_dev)
338 } 342 }
339 343
340 if (attr == &sysfs_cache_mode) { 344 if (attr == &sysfs_cache_mode) {
341 v = sysfs_match_string(bch_cache_modes, buf); 345 v = __sysfs_match_string(bch_cache_modes, -1, buf);
342 if (v < 0) 346 if (v < 0)
343 return v; 347 return v;
344 348
@@ -349,7 +353,7 @@ STORE(__cached_dev)
349 } 353 }
350 354
351 if (attr == &sysfs_stop_when_cache_set_failed) { 355 if (attr == &sysfs_stop_when_cache_set_failed) {
352 v = sysfs_match_string(bch_stop_on_failure_modes, buf); 356 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
353 if (v < 0) 357 if (v < 0)
354 return v; 358 return v;
355 359
@@ -816,7 +820,7 @@ STORE(__bch_cache_set)
816 0, UINT_MAX); 820 0, UINT_MAX);
817 821
818 if (attr == &sysfs_errors) { 822 if (attr == &sysfs_errors) {
819 v = sysfs_match_string(error_actions, buf); 823 v = __sysfs_match_string(error_actions, -1, buf);
820 if (v < 0) 824 if (v < 0)
821 return v; 825 return v;
822 826
@@ -1088,7 +1092,7 @@ STORE(__bch_cache)
1088 } 1092 }
1089 1093
1090 if (attr == &sysfs_cache_replacement_policy) { 1094 if (attr == &sysfs_cache_replacement_policy) {
1091 v = sysfs_match_string(cache_replacement_policies, buf); 1095 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1092 if (v < 0) 1096 if (v < 0)
1093 return v; 1097 return v;
1094 1098
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6b5acc92ca2..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1599 unsigned long freed; 1599 unsigned long freed;
1600 1600
1601 c = container_of(shrink, struct dm_bufio_client, shrinker); 1601 c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 if (!dm_bufio_trylock(c)) 1602 if (sc->gfp_mask & __GFP_FS)
1603 dm_bufio_lock(c);
1604 else if (!dm_bufio_trylock(c))
1603 return SHRINK_STOP; 1605 return SHRINK_STOP;
1604 1606
1605 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); 1607 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 845f376a72d9..8288887b7f94 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -25,6 +25,7 @@ struct dust_device {
25 unsigned long long badblock_count; 25 unsigned long long badblock_count;
26 spinlock_t dust_lock; 26 spinlock_t dust_lock;
27 unsigned int blksz; 27 unsigned int blksz;
28 int sect_per_block_shift;
28 unsigned int sect_per_block; 29 unsigned int sect_per_block;
29 sector_t start; 30 sector_t start;
30 bool fail_read_on_bb:1; 31 bool fail_read_on_bb:1;
@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79 unsigned long flags; 80 unsigned long flags;
80 81
81 spin_lock_irqsave(&dd->dust_lock, flags); 82 spin_lock_irqsave(&dd->dust_lock, flags);
82 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 83 bblock = dust_rb_search(&dd->badblocklist, block);
83 84
84 if (bblock == NULL) { 85 if (bblock == NULL) {
85 if (!dd->quiet_mode) { 86 if (!dd->quiet_mode) {
@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
113 } 114 }
114 115
115 spin_lock_irqsave(&dd->dust_lock, flags); 116 spin_lock_irqsave(&dd->dust_lock, flags);
116 bblock->bb = block * dd->sect_per_block; 117 bblock->bb = block;
117 if (!dust_rb_insert(&dd->badblocklist, bblock)) { 118 if (!dust_rb_insert(&dd->badblocklist, bblock)) {
118 if (!dd->quiet_mode) { 119 if (!dd->quiet_mode) {
119 DMERR("%s: block %llu already in badblocklist", 120 DMERR("%s: block %llu already in badblocklist",
@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
138 unsigned long flags; 139 unsigned long flags;
139 140
140 spin_lock_irqsave(&dd->dust_lock, flags); 141 spin_lock_irqsave(&dd->dust_lock, flags);
141 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 142 bblock = dust_rb_search(&dd->badblocklist, block);
142 if (bblock != NULL) 143 if (bblock != NULL)
143 DMINFO("%s: block %llu found in badblocklist", __func__, block); 144 DMINFO("%s: block %llu found in badblocklist", __func__, block);
144 else 145 else
@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
165 int ret = DM_MAPIO_REMAPPED; 166 int ret = DM_MAPIO_REMAPPED;
166 167
167 if (fail_read_on_bb) { 168 if (fail_read_on_bb) {
169 thisblock >>= dd->sect_per_block_shift;
168 spin_lock_irqsave(&dd->dust_lock, flags); 170 spin_lock_irqsave(&dd->dust_lock, flags);
169 ret = __dust_map_read(dd, thisblock); 171 ret = __dust_map_read(dd, thisblock);
170 spin_unlock_irqrestore(&dd->dust_lock, flags); 172 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195 unsigned long flags; 197 unsigned long flags;
196 198
197 if (fail_read_on_bb) { 199 if (fail_read_on_bb) {
200 thisblock >>= dd->sect_per_block_shift;
198 spin_lock_irqsave(&dd->dust_lock, flags); 201 spin_lock_irqsave(&dd->dust_lock, flags);
199 __dust_map_write(dd, thisblock); 202 __dust_map_write(dd, thisblock);
200 spin_unlock_irqrestore(&dd->dust_lock, flags); 203 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
331 dd->blksz = blksz; 334 dd->blksz = blksz;
332 dd->start = tmp; 335 dd->start = tmp;
333 336
337 dd->sect_per_block_shift = __ffs(sect_per_block);
338
334 /* 339 /*
335 * Whether to fail a read on a "bad" block. 340 * Whether to fail a read on a "bad" block.
336 * Defaults to false; enabled later by message. 341 * Defaults to false; enabled later by message.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b1b0de402dfc..9118ab85cb3a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1943,7 +1943,22 @@ offload_to_thread:
1943 queue_work(ic->wait_wq, &dio->work); 1943 queue_work(ic->wait_wq, &dio->work);
1944 return; 1944 return;
1945 } 1945 }
1946 if (journal_read_pos != NOT_FOUND)
1947 dio->range.n_sectors = ic->sectors_per_block;
1946 wait_and_add_new_range(ic, &dio->range); 1948 wait_and_add_new_range(ic, &dio->range);
1949 /*
1950 * wait_and_add_new_range drops the spinlock, so the journal
1951 * may have been changed arbitrarily. We need to recheck.
1952 * To simplify the code, we restrict I/O size to just one block.
1953 */
1954 if (journal_read_pos != NOT_FOUND) {
1955 sector_t next_sector;
1956 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1957 if (unlikely(new_pos != journal_read_pos)) {
1958 remove_range_unlocked(ic, &dio->range);
1959 goto retry;
1960 }
1961 }
1947 } 1962 }
1948 spin_unlock_irq(&ic->endio_wait.lock); 1963 spin_unlock_irq(&ic->endio_wait.lock);
1949 1964
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index df2011de7be2..1bbe4a34ef4c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job)
566 * no point in continuing. 566 * no point in continuing.
567 */ 567 */
568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && 568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
569 job->master_job->write_err) 569 job->master_job->write_err) {
570 job->write_err = job->master_job->write_err;
570 return -EIO; 571 return -EIO;
572 }
571 573
572 io_job_start(job->kc->throttle); 574 io_job_start(job->kc->throttle);
573 575
@@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
619 else 621 else
620 job->read_err = 1; 622 job->read_err = 1;
621 push(&kc->complete_jobs, job); 623 push(&kc->complete_jobs, job);
624 wake(kc);
622 break; 625 break;
623 } 626 }
624 627
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8a60a4a070ac..1f933dd197cd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3194 */ 3194 */
3195 r = rs_prepare_reshape(rs); 3195 r = rs_prepare_reshape(rs);
3196 if (r) 3196 if (r)
3197 return r; 3197 goto bad;
3198 3198
3199 /* Reshaping ain't recovery, so disable recovery */ 3199 /* Reshaping ain't recovery, so disable recovery */
3200 rs_setup_recovery(rs, MaxSector); 3200 rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index caaee8032afe..8820931ec7d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
882 882
883/* validate the dax capability of the target device span */ 883/* validate the dax capability of the target device span */
884int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, 884int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
885 sector_t start, sector_t len, void *data) 885 sector_t start, sector_t len, void *data)
886{ 886{
887 int blocksize = *(int *) data; 887 int blocksize = *(int *) data;
888 888
889 return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, 889 return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
890 start, len); 890 start, len);
891} 891}
892 892
893/* Check devices support synchronous DAX */ 893/* Check devices support synchronous DAX */
894static int device_synchronous(struct dm_target *ti, struct dm_dev *dev, 894static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
895 sector_t start, sector_t len, void *data) 895 sector_t start, sector_t len, void *data)
896{ 896{
897 return dax_synchronous(dev->dax_dev); 897 return dev->dax_dev && dax_synchronous(dev->dax_dev);
898} 898}
899 899
900bool dm_table_supports_dax(struct dm_table *t, 900bool dm_table_supports_dax(struct dm_table *t,
901 iterate_devices_callout_fn iterate_fn, int *blocksize) 901 iterate_devices_callout_fn iterate_fn, int *blocksize)
902{ 902{
903 struct dm_target *ti; 903 struct dm_target *ti;
904 unsigned i; 904 unsigned i;
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t,
911 return false; 911 return false;
912 912
913 if (!ti->type->iterate_devices || 913 if (!ti->type->iterate_devices ||
914 !ti->type->iterate_devices(ti, iterate_fn, blocksize)) 914 !ti->type->iterate_devices(ti, iterate_fn, blocksize))
915 return false; 915 return false;
916 } 916 }
917 917
@@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t)
1342} 1342}
1343EXPORT_SYMBOL(dm_table_event); 1343EXPORT_SYMBOL(dm_table_event);
1344 1344
1345sector_t dm_table_get_size(struct dm_table *t) 1345inline sector_t dm_table_get_size(struct dm_table *t)
1346{ 1346{
1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1348} 1348}
@@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1367 unsigned int l, n = 0, k = 0; 1367 unsigned int l, n = 0, k = 0;
1368 sector_t *node; 1368 sector_t *node;
1369 1369
1370 if (unlikely(sector >= dm_table_get_size(t)))
1371 return &t->targets[t->num_targets];
1372
1370 for (l = 0; l < t->depth; l++) { 1373 for (l = 0; l < t->depth; l++) {
1371 n = get_child(n, k); 1374 n = get_child(n, k);
1372 node = get_node(t, l, n); 1375 node = get_node(t, l, n);
@@ -1921,7 +1924,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1921 1924
1922 if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { 1925 if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
1923 blk_queue_flag_set(QUEUE_FLAG_DAX, q); 1926 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1924 if (dm_table_supports_dax(t, device_synchronous, NULL)) 1927 if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
1925 set_dax_synchronous(t->md->dax_dev); 1928 set_dax_synchronous(t->md->dax_dev);
1926 } 1929 }
1927 else 1930 else
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -34,7 +35,7 @@
34 * (1) Super block (1 block) 35 * (1) Super block (1 block)
35 * (2) Chunk mapping table (nr_map_blocks) 36 * (2) Chunk mapping table (nr_map_blocks)
36 * (3) Bitmap blocks (nr_bitmap_blocks) 37 * (3) Bitmap blocks (nr_bitmap_blocks)
37 * All metadata blocks are stored in conventional zones, starting from the 38 * All metadata blocks are stored in conventional zones, starting from
38 * the first conventional zone found on disk. 39 * the first conventional zone found on disk.
39 */ 40 */
40struct dmz_super { 41struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore 234 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being 235 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with 236 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are 237 * the map lock and zone state management (active and reclaim state are
237 * mutually exclusive). 238 * mutually exclusive).
238 */ 239 */
239void dmz_lock_metadata(struct dmz_metadata *zmd) 240void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
402 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 403 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
403 struct bio *bio; 404 struct bio *bio;
404 405
406 if (dmz_bdev_is_dying(zmd->dev))
407 return ERR_PTR(-EIO);
408
405 /* Get a new block and a BIO to read it */ 409 /* Get a new block and a BIO to read it */
406 mblk = dmz_alloc_mblock(zmd, mblk_no); 410 mblk = dmz_alloc_mblock(zmd, mblk_no);
407 if (!mblk) 411 if (!mblk)
408 return NULL; 412 return ERR_PTR(-ENOMEM);
409 413
410 bio = bio_alloc(GFP_NOIO, 1); 414 bio = bio_alloc(GFP_NOIO, 1);
411 if (!bio) { 415 if (!bio) {
412 dmz_free_mblock(zmd, mblk); 416 dmz_free_mblock(zmd, mblk);
413 return NULL; 417 return ERR_PTR(-ENOMEM);
414 } 418 }
415 419
416 spin_lock(&zmd->mblk_lock); 420 spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
541 if (!mblk) { 545 if (!mblk) {
542 /* Cache miss: read the block from disk */ 546 /* Cache miss: read the block from disk */
543 mblk = dmz_get_mblock_slow(zmd, mblk_no); 547 mblk = dmz_get_mblock_slow(zmd, mblk_no);
544 if (!mblk) 548 if (IS_ERR(mblk))
545 return ERR_PTR(-ENOMEM); 549 return mblk;
546 } 550 }
547 551
548 /* Wait for on-going read I/O and check for error */ 552 /* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
570/* 574/*
571 * Issue a metadata block write BIO. 575 * Issue a metadata block write BIO.
572 */ 576 */
573static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, 577static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
574 unsigned int set) 578 unsigned int set)
575{ 579{
576 sector_t block = zmd->sb[set].block + mblk->no; 580 sector_t block = zmd->sb[set].block + mblk->no;
577 struct bio *bio; 581 struct bio *bio;
578 582
583 if (dmz_bdev_is_dying(zmd->dev))
584 return -EIO;
585
579 bio = bio_alloc(GFP_NOIO, 1); 586 bio = bio_alloc(GFP_NOIO, 1);
580 if (!bio) { 587 if (!bio) {
581 set_bit(DMZ_META_ERROR, &mblk->state); 588 set_bit(DMZ_META_ERROR, &mblk->state);
582 return; 589 return -ENOMEM;
583 } 590 }
584 591
585 set_bit(DMZ_META_WRITING, &mblk->state); 592 set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
591 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); 598 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
592 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); 599 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
593 submit_bio(bio); 600 submit_bio(bio);
601
602 return 0;
594} 603}
595 604
596/* 605/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
602 struct bio *bio; 611 struct bio *bio;
603 int ret; 612 int ret;
604 613
614 if (dmz_bdev_is_dying(zmd->dev))
615 return -EIO;
616
605 bio = bio_alloc(GFP_NOIO, 1); 617 bio = bio_alloc(GFP_NOIO, 1);
606 if (!bio) 618 if (!bio)
607 return -ENOMEM; 619 return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
659{ 671{
660 struct dmz_mblock *mblk; 672 struct dmz_mblock *mblk;
661 struct blk_plug plug; 673 struct blk_plug plug;
662 int ret = 0; 674 int ret = 0, nr_mblks_submitted = 0;
663 675
664 /* Issue writes */ 676 /* Issue writes */
665 blk_start_plug(&plug); 677 blk_start_plug(&plug);
666 list_for_each_entry(mblk, write_list, link) 678 list_for_each_entry(mblk, write_list, link) {
667 dmz_write_mblock(zmd, mblk, set); 679 ret = dmz_write_mblock(zmd, mblk, set);
680 if (ret)
681 break;
682 nr_mblks_submitted++;
683 }
668 blk_finish_plug(&plug); 684 blk_finish_plug(&plug);
669 685
670 /* Wait for completion */ 686 /* Wait for completion */
671 list_for_each_entry(mblk, write_list, link) { 687 list_for_each_entry(mblk, write_list, link) {
688 if (!nr_mblks_submitted)
689 break;
672 wait_on_bit_io(&mblk->state, DMZ_META_WRITING, 690 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
673 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
674 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 692 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
675 clear_bit(DMZ_META_ERROR, &mblk->state); 693 clear_bit(DMZ_META_ERROR, &mblk->state);
676 ret = -EIO; 694 ret = -EIO;
677 } 695 }
696 nr_mblks_submitted--;
678 } 697 }
679 698
680 /* Flush drive cache (this will also sync data) */ 699 /* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
736 */ 755 */
737 dmz_lock_flush(zmd); 756 dmz_lock_flush(zmd);
738 757
758 if (dmz_bdev_is_dying(zmd->dev)) {
759 ret = -EIO;
760 goto out;
761 }
762
739 /* Get dirty blocks */ 763 /* Get dirty blocks */
740 spin_lock(&zmd->mblk_lock); 764 spin_lock(&zmd->mblk_lock);
741 list_splice_init(&zmd->mblk_dirty_list, &write_list); 765 list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1542 struct dm_zone *zone; 1566 struct dm_zone *zone;
1543 1567
1544 if (list_empty(&zmd->map_rnd_list)) 1568 if (list_empty(&zmd->map_rnd_list))
1545 return NULL; 1569 return ERR_PTR(-EBUSY);
1546 1570
1547 list_for_each_entry(zone, &zmd->map_rnd_list, link) { 1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1548 if (dmz_is_buf(zone)) 1572 if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1553 return dzone; 1577 return dzone;
1554 } 1578 }
1555 1579
1556 return NULL; 1580 return ERR_PTR(-EBUSY);
1557} 1581}
1558 1582
1559/* 1583/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1564 struct dm_zone *zone; 1588 struct dm_zone *zone;
1565 1589
1566 if (list_empty(&zmd->map_seq_list)) 1590 if (list_empty(&zmd->map_seq_list))
1567 return NULL; 1591 return ERR_PTR(-EBUSY);
1568 1592
1569 list_for_each_entry(zone, &zmd->map_seq_list, link) { 1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1570 if (!zone->bzone) 1594 if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1573 return zone; 1597 return zone;
1574 } 1598 }
1575 1599
1576 return NULL; 1600 return ERR_PTR(-EBUSY);
1577} 1601}
1578 1602
1579/* 1603/*
@@ -1628,9 +1652,13 @@ again:
1628 if (op != REQ_OP_WRITE) 1652 if (op != REQ_OP_WRITE)
1629 goto out; 1653 goto out;
1630 1654
1631 /* Alloate a random zone */ 1655 /* Allocate a random zone */
1632 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1633 if (!dzone) { 1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1634 dmz_wait_for_free_zones(zmd); 1662 dmz_wait_for_free_zones(zmd);
1635 goto again; 1663 goto again;
1636 } 1664 }
@@ -1725,9 +1753,13 @@ again:
1725 if (bzone) 1753 if (bzone)
1726 goto out; 1754 goto out;
1727 1755
1728 /* Alloate a random zone */ 1756 /* Allocate a random zone */
1729 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1730 if (!bzone) { 1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1731 dmz_wait_for_free_zones(zmd); 1763 dmz_wait_for_free_zones(zmd);
1732 goto again; 1764 goto again;
1733 } 1765 }
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95eb075..d240d7ca8a8a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -37,7 +38,7 @@ enum {
37/* 38/*
38 * Number of seconds of target BIO inactivity to consider the target idle. 39 * Number of seconds of target BIO inactivity to consider the target idle.
39 */ 40 */
40#define DMZ_IDLE_PERIOD (10UL * HZ) 41#define DMZ_IDLE_PERIOD (10UL * HZ)
41 42
42/* 43/*
43 * Percentage of unmapped (free) random zones below which reclaim starts 44 * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
134 set_bit(DM_KCOPYD_WRITE_SEQ, &flags); 135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
135 136
136 while (block < end_block) { 137 while (block < end_block) {
138 if (dev->flags & DMZ_BDEV_DYING)
139 return -EIO;
140
137 /* Get a valid region from the source zone */ 141 /* Get a valid region from the source zone */
138 ret = dmz_first_valid_block(zmd, src_zone, &block); 142 ret = dmz_first_valid_block(zmd, src_zone, &block);
139 if (ret <= 0) 143 if (ret <= 0)
@@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
215 219
216 dmz_unlock_flush(zmd); 220 dmz_unlock_flush(zmd);
217 221
218 return 0; 222 return ret;
219} 223}
220 224
221/* 225/*
@@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
259 263
260 dmz_unlock_flush(zmd); 264 dmz_unlock_flush(zmd);
261 265
262 return 0; 266 return ret;
263} 267}
264 268
265/* 269/*
@@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
312 316
313 dmz_unlock_flush(zmd); 317 dmz_unlock_flush(zmd);
314 318
315 return 0; 319 return ret;
316} 320}
317 321
318/* 322/*
@@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
334/* 338/*
335 * Find a candidate zone for reclaim and process it. 339 * Find a candidate zone for reclaim and process it.
336 */ 340 */
337static void dmz_reclaim(struct dmz_reclaim *zrc) 341static int dmz_do_reclaim(struct dmz_reclaim *zrc)
338{ 342{
339 struct dmz_metadata *zmd = zrc->metadata; 343 struct dmz_metadata *zmd = zrc->metadata;
340 struct dm_zone *dzone; 344 struct dm_zone *dzone;
@@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
344 348
345 /* Get a data zone */ 349 /* Get a data zone */
346 dzone = dmz_get_zone_for_reclaim(zmd); 350 dzone = dmz_get_zone_for_reclaim(zmd);
347 if (!dzone) 351 if (IS_ERR(dzone))
348 return; 352 return PTR_ERR(dzone);
349 353
350 start = jiffies; 354 start = jiffies;
351 355
@@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
391out: 395out:
392 if (ret) { 396 if (ret) {
393 dmz_unlock_zone_reclaim(dzone); 397 dmz_unlock_zone_reclaim(dzone);
394 return; 398 return ret;
395 } 399 }
396 400
397 (void) dmz_flush_metadata(zrc->metadata); 401 ret = dmz_flush_metadata(zrc->metadata);
402 if (ret) {
403 dmz_dev_debug(zrc->dev,
404 "Metadata flush for zone %u failed, err %d\n",
405 dmz_id(zmd, rzone), ret);
406 return ret;
407 }
398 408
399 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", 409 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
400 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); 410 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
411 return 0;
401} 412}
402 413
403/* 414/*
@@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
427 return false; 438 return false;
428 439
429 /* 440 /*
430 * If the percentage of unmappped random zones is low, 441 * If the percentage of unmapped random zones is low,
431 * reclaim even if the target is busy. 442 * reclaim even if the target is busy.
432 */ 443 */
433 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; 444 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
@@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work)
442 struct dmz_metadata *zmd = zrc->metadata; 453 struct dmz_metadata *zmd = zrc->metadata;
443 unsigned int nr_rnd, nr_unmap_rnd; 454 unsigned int nr_rnd, nr_unmap_rnd;
444 unsigned int p_unmap_rnd; 455 unsigned int p_unmap_rnd;
456 int ret;
457
458 if (dmz_bdev_is_dying(zrc->dev))
459 return;
445 460
446 if (!dmz_should_reclaim(zrc)) { 461 if (!dmz_should_reclaim(zrc)) {
447 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); 462 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work)
471 (dmz_target_idle(zrc) ? "Idle" : "Busy"), 486 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
472 p_unmap_rnd, nr_unmap_rnd, nr_rnd); 487 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
473 488
474 dmz_reclaim(zrc); 489 ret = dmz_do_reclaim(zrc);
490 if (ret) {
491 dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
492 if (ret == -EIO)
493 /*
494 * LLD might be performing some error handling sequence
495 * at the underlying device. To not interfere, do not
496 * attempt to schedule the next reclaim run immediately.
497 */
498 return;
499 }
475 500
476 dmz_schedule_reclaim(zrc); 501 dmz_schedule_reclaim(zrc);
477} 502}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
133 134
134 refcount_inc(&bioctx->ref); 135 refcount_inc(&bioctx->ref);
135 generic_make_request(clone); 136 generic_make_request(clone);
137 if (clone->bi_status == BLK_STS_IOERR)
138 return -EIO;
136 139
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks; 141 zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
277 280
278 /* Get the buffer zone. One will be allocated if needed */ 281 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone); 282 bzone = dmz_get_chunk_buffer(zmd, zone);
280 if (!bzone) 283 if (IS_ERR(bzone))
281 return -ENOSPC; 284 return PTR_ERR(bzone);
282 285
283 if (dmz_is_readonly(bzone)) 286 if (dmz_is_readonly(bzone))
284 return -EROFS; 287 return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
389 392
390 dmz_lock_metadata(zmd); 393 dmz_lock_metadata(zmd);
391 394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
392 /* 400 /*
393 * Get the data zone mapping the chunk. There may be no 401 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained, 402 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
493 501
494 /* Flush dirty metadata blocks */ 502 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata); 503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
496 506
497 /* Process queued flush requests */ 507 /* Process queued flush requests */
498 while (1) { 508 while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
513 * Get a chunk work and start it to process a new BIO. 523 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one. 524 * If the BIO chunk has no work yet, create one.
515 */ 525 */
516static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) 526static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
517{ 527{
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); 528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw; 529 struct dm_chunk_work *cw;
530 int ret = 0;
520 531
521 mutex_lock(&dmz->chunk_lock); 532 mutex_lock(&dmz->chunk_lock);
522 533
523 /* Get the BIO chunk work. If one is not active yet, create one */ 534 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); 535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
525 if (!cw) { 536 if (!cw) {
526 int ret;
527 537
528 /* Create a new chunk work */ 538 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); 539 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
530 if (!cw) 540 if (unlikely(!cw)) {
541 ret = -ENOMEM;
531 goto out; 542 goto out;
543 }
532 544
533 INIT_WORK(&cw->work, dmz_chunk_work); 545 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0); 546 refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); 551 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
540 if (unlikely(ret)) { 552 if (unlikely(ret)) {
541 kfree(cw); 553 kfree(cw);
542 cw = NULL;
543 goto out; 554 goto out;
544 } 555 }
545 } 556 }
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
547 bio_list_add(&cw->bio_list, bio); 558 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw); 559 dmz_get_chunk_work(cw);
549 560
561 dmz_reclaim_bio_acc(dmz->reclaim);
550 if (queue_work(dmz->chunk_wq, &cw->work)) 562 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw); 563 dmz_get_chunk_work(cw);
552out: 564out:
553 mutex_unlock(&dmz->chunk_lock); 565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567}
568
569/*
570 * Check the backing device availability. If it's on the way out,
571 * start failing I/O. Reclaim and metadata components also call this
572 * function to cleanly abort operation in the event of such failure.
573 */
574bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575{
576 struct gendisk *disk;
577
578 if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
579 disk = dmz_dev->bdev->bd_disk;
580 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
581 dmz_dev_warn(dmz_dev, "Backing device queue dying");
582 dmz_dev->flags |= DMZ_BDEV_DYING;
583 } else if (disk->fops->check_events) {
584 if (disk->fops->check_events(disk, 0) &
585 DISK_EVENT_MEDIA_CHANGE) {
586 dmz_dev_warn(dmz_dev, "Backing device offline");
587 dmz_dev->flags |= DMZ_BDEV_DYING;
588 }
589 }
590 }
591
592 return dmz_dev->flags & DMZ_BDEV_DYING;
554} 593}
555 594
556/* 595/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
564 sector_t sector = bio->bi_iter.bi_sector; 603 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio); 604 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector; 605 sector_t chunk_sector;
606 int ret;
607
608 if (dmz_bdev_is_dying(dmz->dev))
609 return DM_MAPIO_KILL;
567 610
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", 611 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors, 612 bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); 644 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
602 645
603 /* Now ready to handle this BIO */ 646 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim); 647 ret = dmz_queue_chunk_work(dmz, bio);
605 dmz_queue_chunk_work(dmz, bio); 648 if (ret) {
649 dmz_dev_debug(dmz->dev,
650 "BIO op %d, can't process chunk %llu, err %i\n",
651 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
652 ret);
653 return DM_MAPIO_REQUEUE;
654 }
606 655
607 return DM_MAPIO_SUBMITTED; 656 return DM_MAPIO_SUBMITTED;
608} 657}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
855{ 904{
856 struct dmz_target *dmz = ti->private; 905 struct dmz_target *dmz = ti->private;
857 906
907 if (dmz_bdev_is_dying(dmz->dev))
908 return -ENODEV;
909
858 *bdev = dmz->dev->bdev; 910 *bdev = dmz->dev->bdev;
859 911
860 return 0; 912 return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49c9a08..d8e70b0ade35 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -56,6 +57,8 @@ struct dmz_dev {
56 57
57 unsigned int nr_zones; 58 unsigned int nr_zones;
58 59
60 unsigned int flags;
61
59 sector_t zone_nr_sectors; 62 sector_t zone_nr_sectors;
60 unsigned int zone_nr_sectors_shift; 63 unsigned int zone_nr_sectors_shift;
61 64
@@ -67,6 +70,9 @@ struct dmz_dev {
67 (dev)->zone_nr_sectors_shift) 70 (dev)->zone_nr_sectors_shift)
68#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) 71#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
69 72
73/* Device flags. */
74#define DMZ_BDEV_DYING (1 << 0)
75
70/* 76/*
71 * Zone descriptor. 77 * Zone descriptor.
72 */ 78 */
@@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
245void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); 251void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
246void dmz_schedule_reclaim(struct dmz_reclaim *zrc); 252void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
247 253
254/*
255 * Functions defined in dm-zoned-target.c
256 */
257bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
258
248#endif /* DM_ZONED_H */ 259#endif /* DM_ZONED_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b319757b1e..8aae0624a297 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
628 628
629 new_parent = shadow_current(s); 629 new_parent = shadow_current(s);
630 630
631 pn = dm_block_data(new_parent);
632 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
633 sizeof(__le64) : s->info->value_type.size;
634
635 /* create & init the left block */
631 r = new_block(s->info, &left); 636 r = new_block(s->info, &left);
632 if (r < 0) 637 if (r < 0)
633 return r; 638 return r;
634 639
640 ln = dm_block_data(left);
641 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
642
643 ln->header.flags = pn->header.flags;
644 ln->header.nr_entries = cpu_to_le32(nr_left);
645 ln->header.max_entries = pn->header.max_entries;
646 ln->header.value_size = pn->header.value_size;
647 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
648 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
649
650 /* create & init the right block */
635 r = new_block(s->info, &right); 651 r = new_block(s->info, &right);
636 if (r < 0) { 652 if (r < 0) {
637 unlock_block(s->info, left); 653 unlock_block(s->info, left);
638 return r; 654 return r;
639 } 655 }
640 656
641 pn = dm_block_data(new_parent);
642 ln = dm_block_data(left);
643 rn = dm_block_data(right); 657 rn = dm_block_data(right);
644
645 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; 658 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
647 659
648 ln->header.flags = pn->header.flags;
649 ln->header.nr_entries = cpu_to_le32(nr_left);
650 ln->header.max_entries = pn->header.max_entries;
651 ln->header.value_size = pn->header.value_size;
652
653 rn->header.flags = pn->header.flags; 660 rn->header.flags = pn->header.flags;
654 rn->header.nr_entries = cpu_to_le32(nr_right); 661 rn->header.nr_entries = cpu_to_le32(nr_right);
655 rn->header.max_entries = pn->header.max_entries; 662 rn->header.max_entries = pn->header.max_entries;
656 rn->header.value_size = pn->header.value_size; 663 rn->header.value_size = pn->header.value_size;
657
658 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
659 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); 664 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
660
661 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
662 sizeof(__le64) : s->info->value_type.size;
663 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
664 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), 665 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
665 nr_right * size); 666 nr_right * size);
666 667
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec449243966..25328582cc48 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
249 } 249 }
250 250
251 if (smm->recursion_count == 1) 251 if (smm->recursion_count == 1)
252 apply_bops(smm); 252 r = apply_bops(smm);
253 253
254 smm->recursion_count--; 254 smm->recursion_count--;
255 255
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5da59c1..11ec048929e8 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
253 */ 253 */
254 254
255 pixsize = vout->bpp * vout->vrfb_bpp; 255 pixsize = vout->bpp * vout->vrfb_bpp;
256 dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - 256 dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
257 (vout->pix.width * vout->bpp)) + 1;
258 257
259 xt->src_start = vout->buf_phy_addr[vb->i]; 258 xt->src_start = vout->buf_phy_addr[vb->i];
260 xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; 259 xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index bc2a176937a4..d535aac68ce1 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -1099,6 +1099,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
1099 1099
1100 /* start creating the vb2 queues */ 1100 /* start creating the vb2 queues */
1101 if (dev->has_vid_cap) { 1101 if (dev->has_vid_cap) {
1102 snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name),
1103 "vivid-%03d-vid-cap", inst);
1102 /* initialize vid_cap queue */ 1104 /* initialize vid_cap queue */
1103 q = &dev->vb_vid_cap_q; 1105 q = &dev->vb_vid_cap_q;
1104 q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1106 q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
@@ -1122,6 +1124,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
1122 } 1124 }
1123 1125
1124 if (dev->has_vid_out) { 1126 if (dev->has_vid_out) {
1127 snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name),
1128 "vivid-%03d-vid-out", inst);
1125 /* initialize vid_out queue */ 1129 /* initialize vid_out queue */
1126 q = &dev->vb_vid_out_q; 1130 q = &dev->vb_vid_out_q;
1127 q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : 1131 q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
@@ -1265,8 +1269,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
1265 /* finally start creating the device nodes */ 1269 /* finally start creating the device nodes */
1266 if (dev->has_vid_cap) { 1270 if (dev->has_vid_cap) {
1267 vfd = &dev->vid_cap_dev; 1271 vfd = &dev->vid_cap_dev;
1268 snprintf(vfd->name, sizeof(vfd->name),
1269 "vivid-%03d-vid-cap", inst);
1270 vfd->fops = &vivid_fops; 1272 vfd->fops = &vivid_fops;
1271 vfd->ioctl_ops = &vivid_ioctl_ops; 1273 vfd->ioctl_ops = &vivid_ioctl_ops;
1272 vfd->device_caps = dev->vid_cap_caps; 1274 vfd->device_caps = dev->vid_cap_caps;
@@ -1312,8 +1314,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
1312 1314
1313 if (dev->has_vid_out) { 1315 if (dev->has_vid_out) {
1314 vfd = &dev->vid_out_dev; 1316 vfd = &dev->vid_out_dev;
1315 snprintf(vfd->name, sizeof(vfd->name),
1316 "vivid-%03d-vid-out", inst);
1317 vfd->vfl_dir = VFL_DIR_TX; 1317 vfd->vfl_dir = VFL_DIR_TX;
1318 vfd->fops = &vivid_fops; 1318 vfd->fops = &vivid_fops;
1319 vfd->ioctl_ops = &vivid_ioctl_ops; 1319 vfd->ioctl_ops = &vivid_ioctl_ops;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 21fb90d66bfc..25c73c13cc7e 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -124,7 +124,7 @@ static inline int check_which(__u32 which)
124static inline int check_pad(struct v4l2_subdev *sd, __u32 pad) 124static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
125{ 125{
126#if defined(CONFIG_MEDIA_CONTROLLER) 126#if defined(CONFIG_MEDIA_CONTROLLER)
127 if (sd->entity.graph_obj.mdev) { 127 if (sd->entity.num_pads) {
128 if (pad >= sd->entity.num_pads) 128 if (pad >= sd->entity.num_pads)
129 return -EINVAL; 129 return -EINVAL;
130 return 0; 130 return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 3f21e26b8d36..90e0f21bc49c 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -1590,8 +1590,10 @@ static unsigned long dsiclk_rate(u8 n)
1590 switch (divsel) { 1590 switch (divsel) {
1591 case PRCM_DSI_PLLOUT_SEL_PHI_4: 1591 case PRCM_DSI_PLLOUT_SEL_PHI_4:
1592 div *= 2; 1592 div *= 2;
1593 /* Fall through */
1593 case PRCM_DSI_PLLOUT_SEL_PHI_2: 1594 case PRCM_DSI_PLLOUT_SEL_PHI_2:
1594 div *= 2; 1595 div *= 2;
1596 /* Fall through */
1595 case PRCM_DSI_PLLOUT_SEL_PHI: 1597 case PRCM_DSI_PLLOUT_SEL_PHI:
1596 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1598 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
1597 PLL_RAW) / div; 1599 PLL_RAW) / div;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 792b855a9104..4798d9f3f9d5 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -308,7 +308,7 @@ static int usbhs_runtime_resume(struct device *dev)
308 i, r); 308 i, r);
309 } 309 }
310 } 310 }
311 /* Fall through as HSIC mode needs utmi_clk */ 311 /* Fall through - as HSIC mode needs utmi_clk */
312 312
313 case OMAP_EHCI_PORT_MODE_TLL: 313 case OMAP_EHCI_PORT_MODE_TLL:
314 if (!IS_ERR(omap->utmi_clk[i])) { 314 if (!IS_ERR(omap->utmi_clk[i])) {
@@ -344,7 +344,7 @@ static int usbhs_runtime_suspend(struct device *dev)
344 344
345 if (!IS_ERR(omap->hsic480m_clk[i])) 345 if (!IS_ERR(omap->hsic480m_clk[i]))
346 clk_disable_unprepare(omap->hsic480m_clk[i]); 346 clk_disable_unprepare(omap->hsic480m_clk[i]);
347 /* Fall through as utmi_clks were used in HSIC mode */ 347 /* Fall through - as utmi_clks were used in HSIC mode */
348 348
349 case OMAP_EHCI_PORT_MODE_TLL: 349 case OMAP_EHCI_PORT_MODE_TLL:
350 if (!IS_ERR(omap->utmi_clk[i])) 350 if (!IS_ERR(omap->utmi_clk[i]))
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 601cefb5c9d8..050478cabc95 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
729 return 0; 729 return 0;
730} 730}
731 731
732static int rk8xx_suspend(struct device *dev) 732static int __maybe_unused rk8xx_suspend(struct device *dev)
733{ 733{
734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
735 int ret = 0; 735 int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
749 return ret; 749 return ret;
750} 750}
751 751
752static int rk8xx_resume(struct device *dev) 752static int __maybe_unused rk8xx_resume(struct device *dev)
753{ 753{
754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
755 int ret = 0; 755 int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
768 768
769 return ret; 769 return ret;
770} 770}
771SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); 771static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
772 772
773static struct i2c_driver rk808_i2c_driver = { 773static struct i2c_driver rk808_i2c_driver = {
774 .driver = { 774 .driver = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6abfc8e92fcc..16900357afc2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -465,6 +465,7 @@ config PCI_ENDPOINT_TEST
465 465
466config XILINX_SDFEC 466config XILINX_SDFEC
467 tristate "Xilinx SDFEC 16" 467 tristate "Xilinx SDFEC 16"
468 depends on HAS_IOMEM
468 help 469 help
469 This option enables support for the Xilinx SDFEC (Soft Decision 470 This option enables support for the Xilinx SDFEC (Soft Decision
470 Forward Error Correction) driver. This enables a char driver 471 Forward Error Correction) driver. This enables a char driver
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index f88094719552..f2abe27010ef 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -5,6 +5,7 @@ config EEPROM_AT24
5 tristate "I2C EEPROMs / RAMs / ROMs from most vendors" 5 tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
6 depends on I2C && SYSFS 6 depends on I2C && SYSFS
7 select NVMEM 7 select NVMEM
8 select NVMEM_SYSFS
8 select REGMAP_I2C 9 select REGMAP_I2C
9 help 10 help
10 Enable this driver to get read/write support to most I2C EEPROMs 11 Enable this driver to get read/write support to most I2C EEPROMs
@@ -34,6 +35,7 @@ config EEPROM_AT25
34 tristate "SPI EEPROMs from most vendors" 35 tristate "SPI EEPROMs from most vendors"
35 depends on SPI && SYSFS 36 depends on SPI && SYSFS
36 select NVMEM 37 select NVMEM
38 select NVMEM_SYSFS
37 help 39 help
38 Enable this driver to get read/write support to most SPI EEPROMs, 40 Enable this driver to get read/write support to most SPI EEPROMs,
39 after you configure the board init code to know about each eeprom 41 after you configure the board init code to know about each eeprom
@@ -80,6 +82,7 @@ config EEPROM_93XX46
80 depends on SPI && SYSFS 82 depends on SPI && SYSFS
81 select REGMAP 83 select REGMAP
82 select NVMEM 84 select NVMEM
85 select NVMEM_SYSFS
83 help 86 help
84 Driver for the microwire EEPROM chipsets 93xx46x. The driver 87 Driver for the microwire EEPROM chipsets 93xx46x. The driver
85 supports both read and write commands and also the command to 88 supports both read and write commands and also the command to
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 35bf2477693d..518945b2f737 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client)
685 nvmem_config.name = dev_name(dev); 685 nvmem_config.name = dev_name(dev);
686 nvmem_config.dev = dev; 686 nvmem_config.dev = dev;
687 nvmem_config.read_only = !writable; 687 nvmem_config.read_only = !writable;
688 nvmem_config.root_only = true; 688 nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
689 nvmem_config.owner = THIS_MODULE; 689 nvmem_config.owner = THIS_MODULE;
690 nvmem_config.compat = true; 690 nvmem_config.compat = true;
691 nvmem_config.base_dev = dev; 691 nvmem_config.base_dev = dev;
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 6ad83d5ef4b0..f00d1c32f6d6 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -683,7 +683,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
683 683
684 rc = hl_poll_timeout_memory(hdev, 684 rc = hl_poll_timeout_memory(hdev,
685 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), 685 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
686 100, jiffies_to_usecs(hdev->timeout_jiffies)); 686 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
687 687
688 if (rc == -ETIMEDOUT) { 688 if (rc == -ETIMEDOUT) {
689 dev_err(hdev->dev, 689 dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 0c4894dd9c02..7a8f9d0b71b5 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -970,7 +970,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
971 if (rc) { 971 if (rc) {
972 dev_err(hdev->dev, "failed to initialize kernel context\n"); 972 dev_err(hdev->dev, "failed to initialize kernel context\n");
973 goto free_ctx; 973 kfree(hdev->kernel_ctx);
974 goto mmu_fini;
974 } 975 }
975 976
976 rc = hl_cb_pool_init(hdev); 977 rc = hl_cb_pool_init(hdev);
@@ -1053,8 +1054,6 @@ release_ctx:
1053 if (hl_ctx_put(hdev->kernel_ctx) != 1) 1054 if (hl_ctx_put(hdev->kernel_ctx) != 1)
1054 dev_err(hdev->dev, 1055 dev_err(hdev->dev,
1055 "kernel ctx is still alive on initialization failure\n"); 1056 "kernel ctx is still alive on initialization failure\n");
1056free_ctx:
1057 kfree(hdev->kernel_ctx);
1058mmu_fini: 1057mmu_fini:
1059 hl_mmu_fini(hdev); 1058 hl_mmu_fini(hdev);
1060eq_fini: 1059eq_fini:
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index cc8168bacb24..ea2ca67fbfbf 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -24,7 +24,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
24{ 24{
25 const struct firmware *fw; 25 const struct firmware *fw;
26 const u64 *fw_data; 26 const u64 *fw_data;
27 size_t fw_size, i; 27 size_t fw_size;
28 int rc; 28 int rc;
29 29
30 rc = request_firmware(&fw, fw_name, hdev->dev); 30 rc = request_firmware(&fw, fw_name, hdev->dev);
@@ -45,22 +45,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
45 45
46 fw_data = (const u64 *) fw->data; 46 fw_data = (const u64 *) fw->data;
47 47
48 if ((fw->size % 8) != 0) 48 memcpy_toio(dst, fw_data, fw_size);
49 fw_size -= 8;
50
51 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
52 if (!(i & (0x80000 - 1))) {
53 dev_dbg(hdev->dev,
54 "copied so far %zu out of %zu for %s firmware",
55 i, fw_size, fw_name);
56 usleep_range(20, 100);
57 }
58
59 writeq(*fw_data, dst);
60 }
61
62 if ((fw->size % 8) != 0)
63 writel(*(const u32 *) fw_data, dst);
64 49
65out: 50out:
66 release_firmware(fw); 51 release_firmware(fw);
@@ -112,7 +97,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
112 } 97 }
113 98
114 rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp, 99 rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
115 (tmp == ARMCP_PACKET_FENCE_VAL), 1000, timeout); 100 (tmp == ARMCP_PACKET_FENCE_VAL), 1000,
101 timeout, true);
116 102
117 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); 103 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
118 104
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 75294ec65257..271c5c8f53b4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -695,8 +695,8 @@ static int goya_sw_init(struct hl_device *hdev)
695 goto free_dma_pool; 695 goto free_dma_pool;
696 } 696 }
697 697
698 dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n", 698 dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
699 hdev->cpu_accessible_dma_address); 699 &hdev->cpu_accessible_dma_address);
700 700
701 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); 701 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
702 if (!hdev->cpu_accessible_dma_pool) { 702 if (!hdev->cpu_accessible_dma_pool) {
@@ -2729,9 +2729,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE); 2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2730} 2730}
2731 2731
2732void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) 2732void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2733{ 2733{
2734 /* Not needed in Goya */ 2734 /* The QMANs are on the SRAM so need to copy to IO space */
2735 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2735} 2736}
2736 2737
2737static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, 2738static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
@@ -2864,7 +2865,8 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2864 } 2865 }
2865 2866
2866 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, 2867 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
2867 (tmp == GOYA_QMAN0_FENCE_VAL), 1000, timeout); 2868 (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
2869 timeout, true);
2868 2870
2869 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0); 2871 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
2870 2872
@@ -2945,7 +2947,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
2945 } 2947 }
2946 2948
2947 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val), 2949 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
2948 1000, GOYA_TEST_QUEUE_WAIT_USEC); 2950 1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
2949 2951
2950 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); 2952 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
2951 2953
@@ -3312,9 +3314,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3312 int rc; 3314 int rc;
3313 3315
3314 dev_dbg(hdev->dev, "DMA packet details:\n"); 3316 dev_dbg(hdev->dev, "DMA packet details:\n");
3315 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3317 dev_dbg(hdev->dev, "source == 0x%llx\n",
3316 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3318 le64_to_cpu(user_dma_pkt->src_addr));
3317 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3319 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3320 le64_to_cpu(user_dma_pkt->dst_addr));
3321 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3318 3322
3319 ctl = le32_to_cpu(user_dma_pkt->ctl); 3323 ctl = le32_to_cpu(user_dma_pkt->ctl);
3320 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> 3324 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
@@ -3343,9 +3347,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3343 struct packet_lin_dma *user_dma_pkt) 3347 struct packet_lin_dma *user_dma_pkt)
3344{ 3348{
3345 dev_dbg(hdev->dev, "DMA packet details:\n"); 3349 dev_dbg(hdev->dev, "DMA packet details:\n");
3346 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3350 dev_dbg(hdev->dev, "source == 0x%llx\n",
3347 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3351 le64_to_cpu(user_dma_pkt->src_addr));
3348 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3352 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3353 le64_to_cpu(user_dma_pkt->dst_addr));
3354 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3349 3355
3350 /* 3356 /*
3351 * WA for HW-23. 3357 * WA for HW-23.
@@ -3385,7 +3391,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
3385 3391
3386 dev_dbg(hdev->dev, "WREG32 packet details:\n"); 3392 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3387 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); 3393 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3388 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); 3394 dev_dbg(hdev->dev, "value == 0x%x\n",
3395 le32_to_cpu(wreg_pkt->value));
3389 3396
3390 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { 3397 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3391 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", 3398 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
@@ -3427,12 +3434,13 @@ static int goya_validate_cb(struct hl_device *hdev,
3427 while (cb_parsed_length < parser->user_cb_size) { 3434 while (cb_parsed_length < parser->user_cb_size) {
3428 enum packet_id pkt_id; 3435 enum packet_id pkt_id;
3429 u16 pkt_size; 3436 u16 pkt_size;
3430 void *user_pkt; 3437 struct goya_packet *user_pkt;
3431 3438
3432 user_pkt = (void *) (uintptr_t) 3439 user_pkt = (struct goya_packet *) (uintptr_t)
3433 (parser->user_cb->kernel_address + cb_parsed_length); 3440 (parser->user_cb->kernel_address + cb_parsed_length);
3434 3441
3435 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3442 pkt_id = (enum packet_id) (
3443 (le64_to_cpu(user_pkt->header) &
3436 PACKET_HEADER_PACKET_ID_MASK) >> 3444 PACKET_HEADER_PACKET_ID_MASK) >>
3437 PACKET_HEADER_PACKET_ID_SHIFT); 3445 PACKET_HEADER_PACKET_ID_SHIFT);
3438 3446
@@ -3452,7 +3460,8 @@ static int goya_validate_cb(struct hl_device *hdev,
3452 * need to validate here as well because patch_cb() is 3460 * need to validate here as well because patch_cb() is
3453 * not called in MMU path while this function is called 3461 * not called in MMU path while this function is called
3454 */ 3462 */
3455 rc = goya_validate_wreg32(hdev, parser, user_pkt); 3463 rc = goya_validate_wreg32(hdev,
3464 parser, (struct packet_wreg32 *) user_pkt);
3456 break; 3465 break;
3457 3466
3458 case PACKET_WREG_BULK: 3467 case PACKET_WREG_BULK:
@@ -3480,10 +3489,10 @@ static int goya_validate_cb(struct hl_device *hdev,
3480 case PACKET_LIN_DMA: 3489 case PACKET_LIN_DMA:
3481 if (is_mmu) 3490 if (is_mmu)
3482 rc = goya_validate_dma_pkt_mmu(hdev, parser, 3491 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3483 user_pkt); 3492 (struct packet_lin_dma *) user_pkt);
3484 else 3493 else
3485 rc = goya_validate_dma_pkt_no_mmu(hdev, parser, 3494 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3486 user_pkt); 3495 (struct packet_lin_dma *) user_pkt);
3487 break; 3496 break;
3488 3497
3489 case PACKET_MSG_LONG: 3498 case PACKET_MSG_LONG:
@@ -3656,15 +3665,16 @@ static int goya_patch_cb(struct hl_device *hdev,
3656 enum packet_id pkt_id; 3665 enum packet_id pkt_id;
3657 u16 pkt_size; 3666 u16 pkt_size;
3658 u32 new_pkt_size = 0; 3667 u32 new_pkt_size = 0;
3659 void *user_pkt, *kernel_pkt; 3668 struct goya_packet *user_pkt, *kernel_pkt;
3660 3669
3661 user_pkt = (void *) (uintptr_t) 3670 user_pkt = (struct goya_packet *) (uintptr_t)
3662 (parser->user_cb->kernel_address + cb_parsed_length); 3671 (parser->user_cb->kernel_address + cb_parsed_length);
3663 kernel_pkt = (void *) (uintptr_t) 3672 kernel_pkt = (struct goya_packet *) (uintptr_t)
3664 (parser->patched_cb->kernel_address + 3673 (parser->patched_cb->kernel_address +
3665 cb_patched_cur_length); 3674 cb_patched_cur_length);
3666 3675
3667 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3676 pkt_id = (enum packet_id) (
3677 (le64_to_cpu(user_pkt->header) &
3668 PACKET_HEADER_PACKET_ID_MASK) >> 3678 PACKET_HEADER_PACKET_ID_MASK) >>
3669 PACKET_HEADER_PACKET_ID_SHIFT); 3679 PACKET_HEADER_PACKET_ID_SHIFT);
3670 3680
@@ -3679,15 +3689,18 @@ static int goya_patch_cb(struct hl_device *hdev,
3679 3689
3680 switch (pkt_id) { 3690 switch (pkt_id) {
3681 case PACKET_LIN_DMA: 3691 case PACKET_LIN_DMA:
3682 rc = goya_patch_dma_packet(hdev, parser, user_pkt, 3692 rc = goya_patch_dma_packet(hdev, parser,
3683 kernel_pkt, &new_pkt_size); 3693 (struct packet_lin_dma *) user_pkt,
3694 (struct packet_lin_dma *) kernel_pkt,
3695 &new_pkt_size);
3684 cb_patched_cur_length += new_pkt_size; 3696 cb_patched_cur_length += new_pkt_size;
3685 break; 3697 break;
3686 3698
3687 case PACKET_WREG_32: 3699 case PACKET_WREG_32:
3688 memcpy(kernel_pkt, user_pkt, pkt_size); 3700 memcpy(kernel_pkt, user_pkt, pkt_size);
3689 cb_patched_cur_length += pkt_size; 3701 cb_patched_cur_length += pkt_size;
3690 rc = goya_validate_wreg32(hdev, parser, kernel_pkt); 3702 rc = goya_validate_wreg32(hdev, parser,
3703 (struct packet_wreg32 *) kernel_pkt);
3691 break; 3704 break;
3692 3705
3693 case PACKET_WREG_BULK: 3706 case PACKET_WREG_BULK:
@@ -4351,6 +4364,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4351 size_t total_pkt_size; 4364 size_t total_pkt_size;
4352 long result; 4365 long result;
4353 int rc; 4366 int rc;
4367 int irq_num_entries, irq_arr_index;
4368 __le32 *goya_irq_arr;
4354 4369
4355 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) + 4370 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4356 irq_arr_size; 4371 irq_arr_size;
@@ -4368,8 +4383,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4368 if (!pkt) 4383 if (!pkt)
4369 return -ENOMEM; 4384 return -ENOMEM;
4370 4385
4371 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); 4386 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4372 memcpy(&pkt->irqs, irq_arr, irq_arr_size); 4387 pkt->length = cpu_to_le32(irq_num_entries);
4388
4389 /* We must perform any necessary endianness conversation on the irq
4390 * array being passed to the goya hardware
4391 */
4392 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4393 irq_arr_index < irq_num_entries ; irq_arr_index++)
4394 goya_irq_arr[irq_arr_index] =
4395 cpu_to_le32(irq_arr[irq_arr_index]);
4373 4396
4374 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << 4397 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4375 ARMCP_PKT_CTL_OPCODE_SHIFT); 4398 ARMCP_PKT_CTL_OPCODE_SHIFT);
@@ -4449,7 +4472,6 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4449 case GOYA_ASYNC_EVENT_ID_AXI_ECC: 4472 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4450 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: 4473 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4451 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: 4474 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4452 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4453 goya_print_irq_info(hdev, event_type, false); 4475 goya_print_irq_info(hdev, event_type, false);
4454 hl_device_reset(hdev, true, false); 4476 hl_device_reset(hdev, true, false);
4455 break; 4477 break;
@@ -4485,6 +4507,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4485 goya_unmask_irq(hdev, event_type); 4507 goya_unmask_irq(hdev, event_type);
4486 break; 4508 break;
4487 4509
4510 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4488 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: 4511 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4489 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: 4512 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4490 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: 4513 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
@@ -5041,7 +5064,7 @@ static const struct hl_asic_funcs goya_funcs = {
5041 .resume = goya_resume, 5064 .resume = goya_resume,
5042 .cb_mmap = goya_cb_mmap, 5065 .cb_mmap = goya_cb_mmap,
5043 .ring_doorbell = goya_ring_doorbell, 5066 .ring_doorbell = goya_ring_doorbell,
5044 .flush_pq_write = goya_flush_pq_write, 5067 .pqe_write = goya_pqe_write,
5045 .asic_dma_alloc_coherent = goya_dma_alloc_coherent, 5068 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5046 .asic_dma_free_coherent = goya_dma_free_coherent, 5069 .asic_dma_free_coherent = goya_dma_free_coherent,
5047 .get_int_queue_base = goya_get_int_queue_base, 5070 .get_int_queue_base = goya_get_int_queue_base,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index f8c611883dc1..d7f48c9c41cd 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -177,7 +177,7 @@ int goya_late_init(struct hl_device *hdev);
177void goya_late_fini(struct hl_device *hdev); 177void goya_late_fini(struct hl_device *hdev);
178 178
179void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 179void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
180void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); 180void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
181void goya_update_eq_ci(struct hl_device *hdev, u32 val); 181void goya_update_eq_ci(struct hl_device *hdev, u32 val);
182void goya_restore_phase_topology(struct hl_device *hdev); 182void goya_restore_phase_topology(struct hl_device *hdev);
183int goya_context_switch(struct hl_device *hdev, u32 asid); 183int goya_context_switch(struct hl_device *hdev, u32 asid);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 10da9940ee0d..ce83adafcf2d 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -441,7 +441,11 @@ enum hl_pll_frequency {
441 * @resume: handles IP specific H/W or SW changes for resume. 441 * @resume: handles IP specific H/W or SW changes for resume.
442 * @cb_mmap: maps a CB. 442 * @cb_mmap: maps a CB.
443 * @ring_doorbell: increment PI on a given QMAN. 443 * @ring_doorbell: increment PI on a given QMAN.
444 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. 444 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
445 * function because the PQs are located in different memory areas
446 * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
447 * writing the PQE must match the destination memory area
448 * properties.
445 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling 449 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
446 * dma_alloc_coherent(). This is ASIC function because 450 * dma_alloc_coherent(). This is ASIC function because
447 * its implementation is not trivial when the driver 451 * its implementation is not trivial when the driver
@@ -510,7 +514,8 @@ struct hl_asic_funcs {
510 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, 514 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
511 u64 kaddress, phys_addr_t paddress, u32 size); 515 u64 kaddress, phys_addr_t paddress, u32 size);
512 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 516 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
513 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); 517 void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
518 struct hl_bd *bd);
514 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size, 519 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
515 dma_addr_t *dma_handle, gfp_t flag); 520 dma_addr_t *dma_handle, gfp_t flag);
516 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, 521 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
@@ -1062,9 +1067,17 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1062/* 1067/*
1063 * address in this macro points always to a memory location in the 1068 * address in this macro points always to a memory location in the
1064 * host's (server's) memory. That location is updated asynchronously 1069 * host's (server's) memory. That location is updated asynchronously
1065 * either by the direct access of the device or by another core 1070 * either by the direct access of the device or by another core.
1071 *
1072 * To work both in LE and BE architectures, we need to distinguish between the
1073 * two states (device or another core updates the memory location). Therefore,
1074 * if mem_written_by_device is true, the host memory being polled will be
1075 * updated directly by the device. If false, the host memory being polled will
1076 * be updated by host CPU. Required so host knows whether or not the memory
1077 * might need to be byte-swapped before returning value to caller.
1066 */ 1078 */
1067#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us) \ 1079#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
1080 mem_written_by_device) \
1068({ \ 1081({ \
1069 ktime_t __timeout; \ 1082 ktime_t __timeout; \
1070 /* timeout should be longer when working with simulator */ \ 1083 /* timeout should be longer when working with simulator */ \
@@ -1077,10 +1090,14 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1077 /* Verify we read updates done by other cores or by device */ \ 1090 /* Verify we read updates done by other cores or by device */ \
1078 mb(); \ 1091 mb(); \
1079 (val) = *((u32 *) (uintptr_t) (addr)); \ 1092 (val) = *((u32 *) (uintptr_t) (addr)); \
1093 if (mem_written_by_device) \
1094 (val) = le32_to_cpu(val); \
1080 if (cond) \ 1095 if (cond) \
1081 break; \ 1096 break; \
1082 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \ 1097 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1083 (val) = *((u32 *) (uintptr_t) (addr)); \ 1098 (val) = *((u32 *) (uintptr_t) (addr)); \
1099 if (mem_written_by_device) \
1100 (val) = le32_to_cpu(val); \
1084 break; \ 1101 break; \
1085 } \ 1102 } \
1086 if (sleep_us) \ 1103 if (sleep_us) \
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index e3b5517897ea..5f5673b74985 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
290 struct hl_device *hdev = job->cs->ctx->hdev; 290 struct hl_device *hdev = job->cs->ctx->hdev;
291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
292 struct hl_bd bd; 292 struct hl_bd bd;
293 u64 *pi, *pbd = (u64 *) &bd; 293 __le64 *pi;
294 294
295 bd.ctl = 0; 295 bd.ctl = 0;
296 bd.len = __cpu_to_le32(job->job_cb_size); 296 bd.len = cpu_to_le32(job->job_cb_size);
297 bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb); 297 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
298 298
299 pi = (u64 *) (uintptr_t) (q->kernel_address + 299 pi = (__le64 *) (uintptr_t) (q->kernel_address +
300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); 300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
301 301
302 pi[0] = pbd[0];
303 pi[1] = pbd[1];
304
305 q->pi++; 302 q->pi++;
306 q->pi &= ((q->int_queue_len << 1) - 1); 303 q->pi &= ((q->int_queue_len << 1) - 1);
307 304
308 /* Flush PQ entry write. Relevant only for specific ASICs */ 305 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
309 hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
310 306
311 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 307 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
312} 308}
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index a14407b975e4..ef54bad20509 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -52,6 +52,19 @@ enum goya_dma_direction {
52#define GOYA_PKT_CTL_MB_SHIFT 31 52#define GOYA_PKT_CTL_MB_SHIFT 31
53#define GOYA_PKT_CTL_MB_MASK 0x80000000 53#define GOYA_PKT_CTL_MB_MASK 0x80000000
54 54
55/* All packets have, at least, an 8-byte header, which contains
56 * the packet type. The kernel driver uses the packet header for packet
57 * validation and to perform any necessary required preparation before
58 * sending them off to the hardware.
59 */
60struct goya_packet {
61 __le64 header;
62 /* The rest of the packet data follows. Use the corresponding
63 * packet_XXX struct to deference the data, based on packet type
64 */
65 u8 contents[0];
66};
67
55struct packet_nop { 68struct packet_nop {
56 __le32 reserved; 69 __le32 reserved;
57 __le32 ctl; 70 __le32 ctl;
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index ea9f72ff456c..199791b57caf 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
80 struct hl_cs_job *job; 80 struct hl_cs_job *job;
81 bool shadow_index_valid; 81 bool shadow_index_valid;
82 u16 shadow_index; 82 u16 shadow_index;
83 u32 *cq_entry; 83 struct hl_cq_entry *cq_entry, *cq_base;
84 u32 *cq_base;
85 84
86 if (hdev->disabled) { 85 if (hdev->disabled) {
87 dev_dbg(hdev->dev, 86 dev_dbg(hdev->dev,
@@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
90 return IRQ_HANDLED; 89 return IRQ_HANDLED;
91 } 90 }
92 91
93 cq_base = (u32 *) (uintptr_t) cq->kernel_address; 92 cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
94 93
95 while (1) { 94 while (1) {
96 bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) 95 bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
96 CQ_ENTRY_READY_MASK)
97 >> CQ_ENTRY_READY_SHIFT); 97 >> CQ_ENTRY_READY_SHIFT);
98 98
99 if (!entry_ready) 99 if (!entry_ready)
100 break; 100 break;
101 101
102 cq_entry = (u32 *) &cq_base[cq->ci]; 102 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
103 103
104 /* 104 /* Make sure we read CQ entry contents after we've
105 * Make sure we read CQ entry contents after we've
106 * checked the ownership bit. 105 * checked the ownership bit.
107 */ 106 */
108 dma_rmb(); 107 dma_rmb();
109 108
110 shadow_index_valid = 109 shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
111 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) 110 CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); 111 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
113 112
114 shadow_index = (u16) 113 shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
115 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) 114 CQ_ENTRY_SHADOW_INDEX_MASK)
116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT); 115 >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
117 116
118 queue = &hdev->kernel_queues[cq->hw_queue_id]; 117 queue = &hdev->kernel_queues[cq->hw_queue_id];
@@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
122 queue_work(hdev->cq_wq, &job->finish_work); 121 queue_work(hdev->cq_wq, &job->finish_work);
123 } 122 }
124 123
125 /* 124 /* Update ci of the context's queue. There is no
126 * Update ci of the context's queue. There is no
127 * need to protect it with spinlock because this update is 125 * need to protect it with spinlock because this update is
128 * done only inside IRQ and there is a different IRQ per 126 * done only inside IRQ and there is a different IRQ per
129 * queue 127 * queue
@@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
131 queue->ci = hl_queue_inc_ptr(queue->ci); 129 queue->ci = hl_queue_inc_ptr(queue->ci);
132 130
133 /* Clear CQ entry ready bit */ 131 /* Clear CQ entry ready bit */
134 cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; 132 cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
133 ~CQ_ENTRY_READY_MASK);
135 134
136 cq->ci = hl_cq_inc_ptr(cq->ci); 135 cq->ci = hl_cq_inc_ptr(cq->ci);
137 136
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 42d237cae1dc..365fb0cb8dff 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
1629 dev_dbg(hdev->dev, 1629 dev_dbg(hdev->dev,
1630 "page list 0x%p of asid %d is still alive\n", 1630 "page list 0x%p of asid %d is still alive\n",
1631 phys_pg_list, ctx->asid); 1631 phys_pg_list, ctx->asid);
1632 atomic64_sub(phys_pg_list->total_size,
1633 &hdev->dram_used_mem);
1632 free_phys_pg_pack(hdev, phys_pg_list); 1634 free_phys_pg_pack(hdev, phys_pg_list);
1633 idr_remove(&vm->phys_pg_pack_handles, i); 1635 idr_remove(&vm->phys_pg_pack_handles, i);
1634 } 1636 }
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 1606658b9b7e..24245ccdba72 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -22,7 +22,7 @@ struct lkdtm_list {
22 * recurse past the end of THREAD_SIZE by default. 22 * recurse past the end of THREAD_SIZE by default.
23 */ 23 */
24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) 24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
25#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) 25#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
26#else 26#else
27#define REC_STACK_SIZE (THREAD_SIZE / 8) 27#define REC_STACK_SIZE (THREAD_SIZE / 8)
28#endif 28#endif
@@ -91,7 +91,7 @@ void lkdtm_LOOP(void)
91 91
92void lkdtm_EXHAUST_STACK(void) 92void lkdtm_EXHAUST_STACK(void)
93{ 93{
94 pr_info("Calling function with %d frame size to depth %d ...\n", 94 pr_info("Calling function with %lu frame size to depth %d ...\n",
95 REC_STACK_SIZE, recur_count); 95 REC_STACK_SIZE, recur_count);
96 recursive_loop(recur_count); 96 recursive_loop(recur_count);
97 pr_info("FAIL: survived without exhausting stack?!\n"); 97 pr_info("FAIL: survived without exhausting stack?!\n");
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index d74b182e19f3..77f7dff7098d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,11 @@
81 81
82#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ 82#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
83 83
84#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
85
86#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
87#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
88
84/* 89/*
85 * MEI HW Section 90 * MEI HW Section
86 */ 91 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7a2b3545a7f9..541538eff8b1 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
98 98
99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
100 100
101 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
102
103 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
104 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
105
101 /* required last entry */ 106 /* required last entry */
102 {0, } 107 {0, }
103}; 108};
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 8840299420e0..5e6be1527571 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
691 } 691 }
692 692
693 if (page) { 693 if (page) {
694 vmballoon_mark_page_offline(page, ctl->page_size);
695 /* Success. Add the page to the list and continue. */ 694 /* Success. Add the page to the list and continue. */
696 list_add(&page->lru, &ctl->pages); 695 list_add(&page->lru, &ctl->pages);
697 continue; 696 continue;
@@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list,
930 929
931 list_for_each_entry_safe(page, tmp, page_list, lru) { 930 list_for_each_entry_safe(page, tmp, page_list, lru) {
932 list_del(&page->lru); 931 list_del(&page->lru);
933 vmballoon_mark_page_online(page, page_size);
934 __free_pages(page, vmballoon_page_order(page_size)); 932 __free_pages(page, vmballoon_page_order(page_size));
935 } 933 }
936 934
@@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
1005 enum vmballoon_page_size_type page_size) 1003 enum vmballoon_page_size_type page_size)
1006{ 1004{
1007 unsigned long flags; 1005 unsigned long flags;
1006 struct page *page;
1008 1007
1009 if (page_size == VMW_BALLOON_4K_PAGE) { 1008 if (page_size == VMW_BALLOON_4K_PAGE) {
1010 balloon_page_list_enqueue(&b->b_dev_info, pages); 1009 balloon_page_list_enqueue(&b->b_dev_info, pages);
@@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
1014 * for the balloon compaction mechanism. 1013 * for the balloon compaction mechanism.
1015 */ 1014 */
1016 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016
1017 list_for_each_entry(page, pages, lru) {
1018 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1019 }
1020
1017 list_splice_init(pages, &b->huge_pages); 1021 list_splice_init(pages, &b->huge_pages);
1018 __count_vm_events(BALLOON_INFLATE, *n_pages * 1022 __count_vm_events(BALLOON_INFLATE, *n_pages *
1019 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); 1023 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
@@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
1056 /* 2MB pages */ 1060 /* 2MB pages */
1057 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1061 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1058 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { 1062 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1063 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1064
1059 list_move(&page->lru, pages); 1065 list_move(&page->lru, pages);
1060 if (++i == n_req_pages) 1066 if (++i == n_req_pages)
1061 break; 1067 break;
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index bad89b6e0802..345addd9306d 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
310 310
311 entry = container_of(resource, struct dbell_entry, resource); 311 entry = container_of(resource, struct dbell_entry, resource);
312 if (entry->run_delayed) { 312 if (entry->run_delayed) {
313 schedule_work(&entry->work); 313 if (!schedule_work(&entry->work))
314 vmci_resource_put(resource);
314 } else { 315 } else {
315 entry->notify_cb(entry->client_data); 316 entry->notify_cb(entry->client_data);
316 vmci_resource_put(resource); 317 vmci_resource_put(resource);
@@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx)
361 atomic_read(&dbell->active) == 1) { 362 atomic_read(&dbell->active) == 1) {
362 if (dbell->run_delayed) { 363 if (dbell->run_delayed) {
363 vmci_resource_get(&dbell->resource); 364 vmci_resource_get(&dbell->resource);
364 schedule_work(&dbell->work); 365 if (!schedule_work(&dbell->work))
366 vmci_resource_put(&dbell->resource);
365 } else { 367 } else {
366 dbell->notify_cb(dbell->client_data); 368 dbell->notify_cb(dbell->client_data);
367 } 369 }
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index e327f80ebe70..7102e2ebc614 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -10,6 +10,7 @@
10#include <linux/kthread.h> 10#include <linux/kthread.h>
11#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/backing-dev.h>
13 14
14#include <linux/mmc/card.h> 15#include <linux/mmc/card.h>
15#include <linux/mmc/host.h> 16#include <linux/mmc/host.h>
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
427 goto free_tag_set; 428 goto free_tag_set;
428 } 429 }
429 430
431 if (mmc_host_is_spi(host) && host->use_spi_crc)
432 mq->queue->backing_dev_info->capabilities |=
433 BDI_CAP_STABLE_WRITES;
434
430 mq->queue->queuedata = mq; 435 mq->queue->queuedata = mq;
431 blk_queue_rq_timeout(mq->queue, 60 * HZ); 436 blk_queue_rq_timeout(mq->queue, 60 * HZ);
432 437
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d681e8aaca83..fe914ff5f5d6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
1292 goto err; 1292 goto err;
1293 } 1293 }
1294 1294
1295 /*
1296 * Some SD cards claims an out of spec VDD voltage range. Let's treat
1297 * these bits as being in-valid and especially also bit7.
1298 */
1299 ocr &= ~0x7FFF;
1300
1295 rocr = mmc_select_voltage(host, ocr); 1301 rocr = mmc_select_voltage(host, ocr);
1296 1302
1297 /* 1303 /*
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index ed5cefb83768..89deb451e0ac 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
374{ 374{
375 data->bytes_xfered = data->blocks * data->blksz; 375 data->bytes_xfered = data->blocks * data->blksz;
376 data->error = 0; 376 data->error = 0;
377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
377 return 1; 378 return 1;
378} 379}
379 380
@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1046 mmc->max_segs = 1; 1047 mmc->max_segs = 1;
1047 1048
1048 /* DMA size field can address up to 8 MB */ 1049 /* DMA size field can address up to 8 MB */
1049 mmc->max_seg_size = 8 * 1024 * 1024; 1050 mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1051 dma_get_max_seg_size(host->dev));
1050 mmc->max_req_size = mmc->max_seg_size; 1052 mmc->max_req_size = mmc->max_seg_size;
1051 /* External DMA is in 512 byte blocks */ 1053 /* External DMA is in 512 byte blocks */
1052 mmc->max_blk_size = 512; 1054 mmc->max_blk_size = 512;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index faaaf52a46d2..eea52e2c5a0c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
2012 * delayed. Allowing the transfer to take place 2012 * delayed. Allowing the transfer to take place
2013 * avoids races and keeps things simple. 2013 * avoids races and keeps things simple.
2014 */ 2014 */
2015 if ((err != -ETIMEDOUT) && 2015 if (err != -ETIMEDOUT) {
2016 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
2017 state = STATE_SENDING_DATA; 2016 state = STATE_SENDING_DATA;
2018 continue; 2017 continue;
2019 } 2018 }
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2d736e416775..ba9a63db73da 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -73,7 +73,7 @@
73 #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) 73 #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
74 #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) 74 #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
75 #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) 75 #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
76 #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) 76 #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
77 #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) 77 #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
78 #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) 78 #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
79 #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) 79 #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b3a130a9ee23..1604f512c7bd 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -883,7 +883,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
883 883
884 sdhci_acpi_byt_setting(&c->pdev->dev); 884 sdhci_acpi_byt_setting(&c->pdev->dev);
885 885
886 return sdhci_runtime_resume_host(c->host); 886 return sdhci_runtime_resume_host(c->host, 0);
887} 887}
888 888
889#endif 889#endif
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 163d1cf4367e..44139fceac24 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; 369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
370 host->mmc_host_ops.hs400_enhanced_strobe = 370 host->mmc_host_ops.hs400_enhanced_strobe =
371 sdhci_cdns_hs400_enhanced_strobe; 371 sdhci_cdns_hs400_enhanced_strobe;
372 sdhci_enable_v4_mode(host);
372 373
373 sdhci_get_of_property(pdev); 374 sdhci_get_of_property(pdev);
374 375
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index c391510e9ef4..776a94216248 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1705,7 +1705,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
1705 esdhc_pltfm_set_clock(host, imx_data->actual_clock); 1705 esdhc_pltfm_set_clock(host, imx_data->actual_clock);
1706 } 1706 }
1707 1707
1708 err = sdhci_runtime_resume_host(host); 1708 err = sdhci_runtime_resume_host(host, 0);
1709 if (err) 1709 if (err)
1710 goto disable_ipg_clk; 1710 goto disable_ipg_clk;
1711 1711
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e377b9bc55a4..e7d1920729fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -289,7 +289,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
289 } 289 }
290 290
291out: 291out:
292 return sdhci_runtime_resume_host(host); 292 return sdhci_runtime_resume_host(host, 0);
293} 293}
294#endif /* CONFIG_PM */ 294#endif /* CONFIG_PM */
295 295
@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
358 pm_runtime_use_autosuspend(&pdev->dev); 358 pm_runtime_use_autosuspend(&pdev->dev);
359 359
360 /* HS200 is broken at this moment */
361 host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
362
360 ret = sdhci_add_host(host); 363 ret = sdhci_add_host(host);
361 if (ret) 364 if (ret)
362 goto pm_runtime_disable; 365 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 4041878eb0f3..7d06e2860c36 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -167,7 +167,7 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
167 167
168err_pci_runtime_suspend: 168err_pci_runtime_suspend:
169 while (--i >= 0) 169 while (--i >= 0)
170 sdhci_runtime_resume_host(chip->slots[i]->host); 170 sdhci_runtime_resume_host(chip->slots[i]->host, 0);
171 return ret; 171 return ret;
172} 172}
173 173
@@ -181,7 +181,7 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
181 if (!slot) 181 if (!slot)
182 continue; 182 continue;
183 183
184 ret = sdhci_runtime_resume_host(slot->host); 184 ret = sdhci_runtime_resume_host(slot->host, 0);
185 if (ret) 185 if (ret)
186 return ret; 186 return ret;
187 } 187 }
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 3ddecf479295..e55037ceda73 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -554,7 +554,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
554 if (!IS_ERR(pxa->clk_core)) 554 if (!IS_ERR(pxa->clk_core))
555 clk_prepare_enable(pxa->clk_core); 555 clk_prepare_enable(pxa->clk_core);
556 556
557 return sdhci_runtime_resume_host(host); 557 return sdhci_runtime_resume_host(host, 0);
558} 558}
559#endif 559#endif
560 560
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 8e4a8ba33f05..f5753aef7151 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -745,7 +745,7 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
745 clk_prepare_enable(busclk); 745 clk_prepare_enable(busclk);
746 if (ourhost->cur_clk >= 0) 746 if (ourhost->cur_clk >= 0)
747 clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); 747 clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
748 ret = sdhci_runtime_resume_host(host); 748 ret = sdhci_runtime_resume_host(host, 0);
749 return ret; 749 return ret;
750} 750}
751#endif 751#endif
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6ee340a3fb3a..d07b9793380f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); 217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
218 u32 div, val, mask; 218 u32 div, val, mask;
219 219
220 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); 220 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
221 221
222 clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); 222 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
223 sdhci_enable_clk(host, clk); 223 div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
224 sdhci_enable_clk(host, div);
224 225
225 /* enable auto gate sdhc_enable_auto_gate */ 226 /* enable auto gate sdhc_enable_auto_gate */
226 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); 227 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
@@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
373 return 1 << 31; 374 return 1 << 31;
374} 375}
375 376
377static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
378{
379 return 0;
380}
381
376static struct sdhci_ops sdhci_sprd_ops = { 382static struct sdhci_ops sdhci_sprd_ops = {
377 .read_l = sdhci_sprd_readl, 383 .read_l = sdhci_sprd_readl,
378 .write_l = sdhci_sprd_writel, 384 .write_l = sdhci_sprd_writel,
@@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
385 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, 391 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
386 .hw_reset = sdhci_sprd_hw_reset, 392 .hw_reset = sdhci_sprd_hw_reset,
387 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, 393 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
394 .get_ro = sdhci_sprd_get_ro,
388}; 395};
389 396
390static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) 397static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
501} 508}
502 509
503static const struct sdhci_pltfm_data sdhci_sprd_pdata = { 510static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
504 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 511 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
512 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
513 SDHCI_QUIRK_MISSING_CAPS,
505 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 514 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
506 SDHCI_QUIRK2_USE_32BIT_BLK_CNT, 515 SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
516 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
507 .ops = &sdhci_sprd_ops, 517 .ops = &sdhci_sprd_ops,
508}; 518};
509 519
@@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
605 615
606 sdhci_enable_v4_mode(host); 616 sdhci_enable_v4_mode(host);
607 617
618 /*
619 * Supply the existing CAPS, but clear the UHS-I modes. This
620 * will allow these modes to be specified only by device
621 * tree properties through mmc_of_parse().
622 */
623 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
624 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
625 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
626 SDHCI_SUPPORT_DDR50);
627
608 ret = sdhci_setup_host(host); 628 ret = sdhci_setup_host(host);
609 if (ret) 629 if (ret)
610 goto pm_runtime_disable; 630 goto pm_runtime_disable;
@@ -624,6 +644,7 @@ err_cleanup_host:
624 sdhci_cleanup_host(host); 644 sdhci_cleanup_host(host);
625 645
626pm_runtime_disable: 646pm_runtime_disable:
647 pm_runtime_put_noidle(&pdev->dev);
627 pm_runtime_disable(&pdev->dev); 648 pm_runtime_disable(&pdev->dev);
628 pm_runtime_set_suspended(&pdev->dev); 649 pm_runtime_set_suspended(&pdev->dev);
629 650
@@ -695,7 +716,7 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
695 if (ret) 716 if (ret)
696 goto clk_disable; 717 goto clk_disable;
697 718
698 sdhci_runtime_resume_host(host); 719 sdhci_runtime_resume_host(host, 1);
699 return 0; 720 return 0;
700 721
701clk_disable: 722clk_disable:
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f4d4761cf20a..02d8f524bb9e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
258 } 258 }
259} 259}
260 260
261static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
262{
263 /*
264 * Write-enable shall be assumed if GPIO is missing in a board's
265 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
266 * Tegra.
267 */
268 return mmc_gpio_get_ro(host->mmc);
269}
270
261static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 271static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
262{ 272{
263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 273 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1224}; 1234};
1225 1235
1226static const struct sdhci_ops tegra_sdhci_ops = { 1236static const struct sdhci_ops tegra_sdhci_ops = {
1237 .get_ro = tegra_sdhci_get_ro,
1227 .read_w = tegra_sdhci_readw, 1238 .read_w = tegra_sdhci_readw,
1228 .write_l = tegra_sdhci_writel, 1239 .write_l = tegra_sdhci_writel,
1229 .set_clock = tegra_sdhci_set_clock, 1240 .set_clock = tegra_sdhci_set_clock,
@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1279}; 1290};
1280 1291
1281static const struct sdhci_ops tegra114_sdhci_ops = { 1292static const struct sdhci_ops tegra114_sdhci_ops = {
1293 .get_ro = tegra_sdhci_get_ro,
1282 .read_w = tegra_sdhci_readw, 1294 .read_w = tegra_sdhci_readw,
1283 .write_w = tegra_sdhci_writew, 1295 .write_w = tegra_sdhci_writew,
1284 .write_l = tegra_sdhci_writel, 1296 .write_l = tegra_sdhci_writel,
@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1332}; 1344};
1333 1345
1334static const struct sdhci_ops tegra210_sdhci_ops = { 1346static const struct sdhci_ops tegra210_sdhci_ops = {
1347 .get_ro = tegra_sdhci_get_ro,
1335 .read_w = tegra_sdhci_readw, 1348 .read_w = tegra_sdhci_readw,
1336 .write_w = tegra210_sdhci_writew, 1349 .write_w = tegra210_sdhci_writew,
1337 .write_l = tegra_sdhci_writel, 1350 .write_l = tegra_sdhci_writel,
@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1366}; 1379};
1367 1380
1368static const struct sdhci_ops tegra186_sdhci_ops = { 1381static const struct sdhci_ops tegra186_sdhci_ops = {
1382 .get_ro = tegra_sdhci_get_ro,
1369 .read_w = tegra_sdhci_readw, 1383 .read_w = tegra_sdhci_readw,
1370 .write_l = tegra_sdhci_writel, 1384 .write_l = tegra_sdhci_writel,
1371 .set_clock = tegra_sdhci_set_clock, 1385 .set_clock = tegra_sdhci_set_clock,
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 8a18f14cf842..1dea1ba66f7b 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -638,7 +638,7 @@ static int xenon_runtime_resume(struct device *dev)
638 priv->restore_needed = false; 638 priv->restore_needed = false;
639 } 639 }
640 640
641 ret = sdhci_runtime_resume_host(host); 641 ret = sdhci_runtime_resume_host(host, 0);
642 if (ret) 642 if (ret)
643 goto out; 643 goto out;
644 return 0; 644 return 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 59acf8e3331e..a5dc5aae973e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3320,7 +3320,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
3320} 3320}
3321EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3321EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3322 3322
3323int sdhci_runtime_resume_host(struct sdhci_host *host) 3323int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3324{ 3324{
3325 struct mmc_host *mmc = host->mmc; 3325 struct mmc_host *mmc = host->mmc;
3326 unsigned long flags; 3326 unsigned long flags;
@@ -3331,7 +3331,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
3331 host->ops->enable_dma(host); 3331 host->ops->enable_dma(host);
3332 } 3332 }
3333 3333
3334 sdhci_init(host, 0); 3334 sdhci_init(host, soft_reset);
3335 3335
3336 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3336 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3337 mmc->ios.power_mode != MMC_POWER_OFF) { 3337 mmc->ios.power_mode != MMC_POWER_OFF) {
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 89fd96596a1f..902f855efe8f 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -781,7 +781,7 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
781int sdhci_suspend_host(struct sdhci_host *host); 781int sdhci_suspend_host(struct sdhci_host *host);
782int sdhci_resume_host(struct sdhci_host *host); 782int sdhci_resume_host(struct sdhci_host *host);
783int sdhci_runtime_suspend_host(struct sdhci_host *host); 783int sdhci_runtime_suspend_host(struct sdhci_host *host);
784int sdhci_runtime_resume_host(struct sdhci_host *host); 784int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
785#endif 785#endif
786 786
787void sdhci_cqe_enable(struct mmc_host *mmc); 787void sdhci_cqe_enable(struct mmc_host *mmc);
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index cff6bbd226f5..a4d8968d133d 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MTD_HYPERBUS 1menuconfig MTD_HYPERBUS
2 tristate "HyperBus support" 2 tristate "HyperBus support"
3 depends on HAS_IOMEM
3 select MTD_CFI 4 select MTD_CFI
4 select MTD_MAP_BANK_WIDTH_2 5 select MTD_MAP_BANK_WIDTH_2
5 select MTD_CFI_AMDSTD 6 select MTD_CFI_AMDSTD
@@ -14,8 +15,9 @@ if MTD_HYPERBUS
14 15
15config HBMC_AM654 16config HBMC_AM654
16 tristate "HyperBus controller driver for AM65x SoC" 17 tristate "HyperBus controller driver for AM65x SoC"
18 depends on ARM64 || COMPILE_TEST
17 select MULTIPLEXER 19 select MULTIPLEXER
18 select MUX_MMIO 20 imply MUX_MMIO
19 help 21 help
20 This is the driver for HyperBus controller on TI's AM65x and 22 This is the driver for HyperBus controller on TI's AM65x and
21 other SoCs 23 other SoCs
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 895510d40ce4..47602af4ee34 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
81 default: 81 default:
82 printk(KERN_WARNING "SA1100 flash: unknown base address " 82 printk(KERN_WARNING "SA1100 flash: unknown base address "
83 "0x%08lx, assuming CS0\n", phys); 83 "0x%08lx, assuming CS0\n", phys);
84 /* Fall through */
84 85
85 case SA1100_CS0_PHYS: 86 case SA1100_CS0_PHYS:
86 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; 87 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index a1f8fe1abb10..e082d632fb74 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3259,6 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd)
3259 switch (density) { 3259 switch (density) {
3260 case ONENAND_DEVICE_DENSITY_8Gb: 3260 case ONENAND_DEVICE_DENSITY_8Gb:
3261 this->options |= ONENAND_HAS_NOP_1; 3261 this->options |= ONENAND_HAS_NOP_1;
3262 /* fall through */
3262 case ONENAND_DEVICE_DENSITY_4Gb: 3263 case ONENAND_DEVICE_DENSITY_4Gb:
3263 if (ONENAND_IS_DDP(this)) 3264 if (ONENAND_IS_DDP(this))
3264 this->options |= ONENAND_HAS_2PLANE; 3265 this->options |= ONENAND_HAS_2PLANE;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 1622d3145587..8ca9fad6e6ad 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
390 (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2) 390 (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
391 return MICRON_ON_DIE_UNSUPPORTED; 391 return MICRON_ON_DIE_UNSUPPORTED;
392 392
393 /*
394 * It seems that there are devices which do not support ECC officially.
395 * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
396 * enabling the ECC feature but don't reflect that to the READ_ID table.
397 * So we have to guarantee that we disable the ECC feature directly
398 * after we did the READ_ID table command. Later we can evaluate the
399 * ECC_ENABLE support.
400 */
393 ret = micron_nand_on_die_ecc_setup(chip, true); 401 ret = micron_nand_on_die_ecc_setup(chip, true);
394 if (ret) 402 if (ret)
395 return MICRON_ON_DIE_UNSUPPORTED; 403 return MICRON_ON_DIE_UNSUPPORTED;
@@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
398 if (ret) 406 if (ret)
399 return MICRON_ON_DIE_UNSUPPORTED; 407 return MICRON_ON_DIE_UNSUPPORTED;
400 408
401 if (!(id[4] & MICRON_ID_ECC_ENABLED))
402 return MICRON_ON_DIE_UNSUPPORTED;
403
404 ret = micron_nand_on_die_ecc_setup(chip, false); 409 ret = micron_nand_on_die_ecc_setup(chip, false);
405 if (ret) 410 if (ret)
406 return MICRON_ON_DIE_UNSUPPORTED; 411 return MICRON_ON_DIE_UNSUPPORTED;
407 412
413 if (!(id[4] & MICRON_ID_ECC_ENABLED))
414 return MICRON_ON_DIE_UNSUPPORTED;
415
408 ret = nand_readid_op(chip, 0, id, sizeof(id)); 416 ret = nand_readid_op(chip, 0, id, sizeof(id));
409 if (ret) 417 if (ret)
410 return MICRON_ON_DIE_UNSUPPORTED; 418 return MICRON_ON_DIE_UNSUPPORTED;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 03cc788511d5..654bdc41fc99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
3780 default: 3780 default:
3781 /* Kept only for backward compatibility purpose. */ 3781 /* Kept only for backward compatibility purpose. */
3782 params->quad_enable = spansion_quad_enable; 3782 params->quad_enable = spansion_quad_enable;
3783 if (nor->clear_sr_bp)
3784 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
3785 break; 3783 break;
3786 } 3784 }
3787 3785
@@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor)
4035 int err; 4033 int err;
4036 4034
4037 if (nor->clear_sr_bp) { 4035 if (nor->clear_sr_bp) {
4036 if (nor->quad_enable == spansion_quad_enable)
4037 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4038
4038 err = nor->clear_sr_bp(nor); 4039 err = nor->clear_sr_bp(nor);
4039 if (err) { 4040 if (err) {
4040 dev_err(nor->dev, 4041 dev_err(nor->dev,
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 11c5bad95226..14a5fb378145 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -363,10 +363,13 @@ static int __init arcrimi_setup(char *s)
363 switch (ints[0]) { 363 switch (ints[0]) {
364 default: /* ERROR */ 364 default: /* ERROR */
365 pr_err("Too many arguments\n"); 365 pr_err("Too many arguments\n");
366 /* Fall through */
366 case 3: /* Node ID */ 367 case 3: /* Node ID */
367 node = ints[3]; 368 node = ints[3];
369 /* Fall through */
368 case 2: /* IRQ */ 370 case 2: /* IRQ */
369 irq = ints[2]; 371 irq = ints[2];
372 /* Fall through */
370 case 1: /* IO address */ 373 case 1: /* IO address */
371 io = ints[1]; 374 io = ints[1];
372 } 375 }
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 28510e33924f..cd27fdc1059b 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -197,16 +197,22 @@ static int __init com20020isa_setup(char *s)
197 switch (ints[0]) { 197 switch (ints[0]) {
198 default: /* ERROR */ 198 default: /* ERROR */
199 pr_info("Too many arguments\n"); 199 pr_info("Too many arguments\n");
200 /* Fall through */
200 case 6: /* Timeout */ 201 case 6: /* Timeout */
201 timeout = ints[6]; 202 timeout = ints[6];
203 /* Fall through */
202 case 5: /* CKP value */ 204 case 5: /* CKP value */
203 clockp = ints[5]; 205 clockp = ints[5];
206 /* Fall through */
204 case 4: /* Backplane flag */ 207 case 4: /* Backplane flag */
205 backplane = ints[4]; 208 backplane = ints[4];
209 /* Fall through */
206 case 3: /* Node ID */ 210 case 3: /* Node ID */
207 node = ints[3]; 211 node = ints[3];
212 /* Fall through */
208 case 2: /* IRQ */ 213 case 2: /* IRQ */
209 irq = ints[2]; 214 irq = ints[2];
215 /* Fall through */
210 case 1: /* IO address */ 216 case 1: /* IO address */
211 io = ints[1]; 217 io = ints[1];
212 } 218 }
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 2c546013a980..186bbf87bc84 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -363,8 +363,10 @@ static int __init com90io_setup(char *s)
363 switch (ints[0]) { 363 switch (ints[0]) {
364 default: /* ERROR */ 364 default: /* ERROR */
365 pr_err("Too many arguments\n"); 365 pr_err("Too many arguments\n");
366 /* Fall through */
366 case 2: /* IRQ */ 367 case 2: /* IRQ */
367 irq = ints[2]; 368 irq = ints[2];
369 /* Fall through */
368 case 1: /* IO address */ 370 case 1: /* IO address */
369 io = ints[1]; 371 io = ints[1];
370 } 372 }
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index ca4a57c30bf8..bd75d06ad7df 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -693,10 +693,13 @@ static int __init com90xx_setup(char *s)
693 switch (ints[0]) { 693 switch (ints[0]) {
694 default: /* ERROR */ 694 default: /* ERROR */
695 pr_err("Too many arguments\n"); 695 pr_err("Too many arguments\n");
696 /* Fall through */
696 case 3: /* Mem address */ 697 case 3: /* Mem address */
697 shmem = ints[3]; 698 shmem = ints[3];
699 /* Fall through */
698 case 2: /* IRQ */ 700 case 2: /* IRQ */
699 irq = ints[2]; 701 irq = ints[2];
702 /* Fall through */
700 case 1: /* IO address */ 703 case 1: /* IO address */
701 io = ints[1]; 704 io = ints[1];
702 } 705 }
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9b7016abca2f..931d9d935686 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond)
1126done: 1126done:
1127 bond_dev->vlan_features = vlan_features; 1127 bond_dev->vlan_features = vlan_features;
1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1129 NETIF_F_HW_VLAN_CTAG_TX |
1130 NETIF_F_HW_VLAN_STAG_TX |
1129 NETIF_F_GSO_UDP_L4; 1131 NETIF_F_GSO_UDP_L4;
1130 bond_dev->mpls_features = mpls_features; 1132 bond_dev->mpls_features = mpls_features;
1131 bond_dev->gso_max_segs = gso_max_segs; 1133 bond_dev->gso_max_segs = gso_max_segs;
@@ -2196,6 +2198,15 @@ static void bond_miimon_commit(struct bonding *bond)
2196 bond_for_each_slave(bond, slave, iter) { 2198 bond_for_each_slave(bond, slave, iter) {
2197 switch (slave->new_link) { 2199 switch (slave->new_link) {
2198 case BOND_LINK_NOCHANGE: 2200 case BOND_LINK_NOCHANGE:
2201 /* For 802.3ad mode, check current slave speed and
2202 * duplex again in case its port was disabled after
2203 * invalid speed/duplex reporting but recovered before
2204 * link monitoring could make a decision on the actual
2205 * link status
2206 */
2207 if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2208 slave->link == BOND_LINK_UP)
2209 bond_3ad_adapter_speed_duplex_changed(slave);
2199 continue; 2210 continue;
2200 2211
2201 case BOND_LINK_UP: 2212 case BOND_LINK_UP:
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 1d4075903971..c8e1a04ba384 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -898,7 +898,8 @@ static void at91_irq_err_state(struct net_device *dev,
898 CAN_ERR_CRTL_TX_WARNING : 898 CAN_ERR_CRTL_TX_WARNING :
899 CAN_ERR_CRTL_RX_WARNING; 899 CAN_ERR_CRTL_RX_WARNING;
900 } 900 }
901 case CAN_STATE_ERROR_WARNING: /* fallthrough */ 901 /* fall through */
902 case CAN_STATE_ERROR_WARNING:
902 /* 903 /*
903 * from: ERROR_ACTIVE, ERROR_WARNING 904 * from: ERROR_ACTIVE, ERROR_WARNING
904 * to : ERROR_PASSIVE, BUS_OFF 905 * to : ERROR_PASSIVE, BUS_OFF
@@ -947,7 +948,8 @@ static void at91_irq_err_state(struct net_device *dev,
947 netdev_dbg(dev, "Error Active\n"); 948 netdev_dbg(dev, "Error Active\n");
948 cf->can_id |= CAN_ERR_PROT; 949 cf->can_id |= CAN_ERR_PROT;
949 cf->data[2] = CAN_ERR_PROT_ACTIVE; 950 cf->data[2] = CAN_ERR_PROT_ACTIVE;
950 case CAN_STATE_ERROR_WARNING: /* fallthrough */ 951 /* fall through */
952 case CAN_STATE_ERROR_WARNING:
951 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; 953 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
952 reg_ier = AT91_IRQ_ERRP; 954 reg_ier = AT91_IRQ_ERRP;
953 break; 955 break;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b6b93a2d93a5..483d270664cc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1249,6 +1249,8 @@ int register_candev(struct net_device *dev)
1249 return -EINVAL; 1249 return -EINVAL;
1250 1250
1251 dev->rtnl_link_ops = &can_link_ops; 1251 dev->rtnl_link_ops = &can_link_ops;
1252 netif_carrier_off(dev);
1253
1252 return register_netdev(dev); 1254 return register_netdev(dev);
1253} 1255}
1254EXPORT_SYMBOL_GPL(register_candev); 1256EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index f2fe344593d5..fcec8bcb53d6 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -400,9 +400,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
400 priv->write(reg_mcr, &regs->mcr); 400 priv->write(reg_mcr, &regs->mcr);
401} 401}
402 402
403static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv) 403static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
404{ 404{
405 struct flexcan_regs __iomem *regs = priv->regs; 405 struct flexcan_regs __iomem *regs = priv->regs;
406 unsigned int ackval;
406 u32 reg_mcr; 407 u32 reg_mcr;
407 408
408 reg_mcr = priv->read(&regs->mcr); 409 reg_mcr = priv->read(&regs->mcr);
@@ -412,20 +413,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
412 /* enable stop request */ 413 /* enable stop request */
413 regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 414 regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
414 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); 415 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
416
417 /* get stop acknowledgment */
418 if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
419 ackval, ackval & (1 << priv->stm.ack_bit),
420 0, FLEXCAN_TIMEOUT_US))
421 return -ETIMEDOUT;
422
423 return 0;
415} 424}
416 425
417static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv) 426static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
418{ 427{
419 struct flexcan_regs __iomem *regs = priv->regs; 428 struct flexcan_regs __iomem *regs = priv->regs;
429 unsigned int ackval;
420 u32 reg_mcr; 430 u32 reg_mcr;
421 431
422 /* remove stop request */ 432 /* remove stop request */
423 regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 433 regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
424 1 << priv->stm.req_bit, 0); 434 1 << priv->stm.req_bit, 0);
425 435
436 /* get stop acknowledgment */
437 if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
438 ackval, !(ackval & (1 << priv->stm.ack_bit)),
439 0, FLEXCAN_TIMEOUT_US))
440 return -ETIMEDOUT;
441
426 reg_mcr = priv->read(&regs->mcr); 442 reg_mcr = priv->read(&regs->mcr);
427 reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; 443 reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
428 priv->write(reg_mcr, &regs->mcr); 444 priv->write(reg_mcr, &regs->mcr);
445
446 return 0;
429} 447}
430 448
431static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) 449static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
@@ -1437,10 +1455,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1437 1455
1438 priv = netdev_priv(dev); 1456 priv = netdev_priv(dev);
1439 priv->stm.gpr = syscon_node_to_regmap(gpr_np); 1457 priv->stm.gpr = syscon_node_to_regmap(gpr_np);
1440 of_node_put(gpr_np);
1441 if (IS_ERR(priv->stm.gpr)) { 1458 if (IS_ERR(priv->stm.gpr)) {
1442 dev_dbg(&pdev->dev, "could not find gpr regmap\n"); 1459 dev_dbg(&pdev->dev, "could not find gpr regmap\n");
1443 return PTR_ERR(priv->stm.gpr); 1460 ret = PTR_ERR(priv->stm.gpr);
1461 goto out_put_node;
1444 } 1462 }
1445 1463
1446 priv->stm.req_gpr = out_val[1]; 1464 priv->stm.req_gpr = out_val[1];
@@ -1455,7 +1473,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1455 1473
1456 device_set_wakeup_capable(&pdev->dev, true); 1474 device_set_wakeup_capable(&pdev->dev, true);
1457 1475
1458 return 0; 1476out_put_node:
1477 of_node_put(gpr_np);
1478 return ret;
1459} 1479}
1460 1480
1461static const struct of_device_id flexcan_of_match[] = { 1481static const struct of_device_id flexcan_of_match[] = {
@@ -1612,7 +1632,9 @@ static int __maybe_unused flexcan_suspend(struct device *device)
1612 */ 1632 */
1613 if (device_may_wakeup(device)) { 1633 if (device_may_wakeup(device)) {
1614 enable_irq_wake(dev->irq); 1634 enable_irq_wake(dev->irq);
1615 flexcan_enter_stop_mode(priv); 1635 err = flexcan_enter_stop_mode(priv);
1636 if (err)
1637 return err;
1616 } else { 1638 } else {
1617 err = flexcan_chip_disable(priv); 1639 err = flexcan_chip_disable(priv);
1618 if (err) 1640 if (err)
@@ -1662,10 +1684,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
1662{ 1684{
1663 struct net_device *dev = dev_get_drvdata(device); 1685 struct net_device *dev = dev_get_drvdata(device);
1664 struct flexcan_priv *priv = netdev_priv(dev); 1686 struct flexcan_priv *priv = netdev_priv(dev);
1687 int err;
1665 1688
1666 if (netif_running(dev) && device_may_wakeup(device)) { 1689 if (netif_running(dev) && device_may_wakeup(device)) {
1667 flexcan_enable_wakeup_irq(priv, false); 1690 flexcan_enable_wakeup_irq(priv, false);
1668 flexcan_exit_stop_mode(priv); 1691 err = flexcan_exit_stop_mode(priv);
1692 if (err)
1693 return err;
1669 } 1694 }
1670 1695
1671 return 0; 1696 return 0;
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 7f6a3b971da9..13b10cbf236a 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -660,7 +660,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
660 pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, 660 pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
661 PCIEFD_REG_CAN_CLK_SEL); 661 PCIEFD_REG_CAN_CLK_SEL);
662 662
663 /* fallthough */ 663 /* fall through */
664 case CANFD_CLK_SEL_80MHZ: 664 case CANFD_CLK_SEL_80MHZ:
665 priv->ucan.can.clock.freq = 80 * 1000 * 1000; 665 priv->ucan.can.clock.freq = 80 * 1000 * 1000;
666 break; 666 break;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 05410008aa6b..de34a4b82d4a 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
1508 1508
1509 /* All packets processed */ 1509 /* All packets processed */
1510 if (num_pkts < quota) { 1510 if (num_pkts < quota) {
1511 napi_complete_done(napi, num_pkts); 1511 if (napi_complete_done(napi, num_pkts)) {
1512 /* Enable Rx FIFO interrupts */ 1512 /* Enable Rx FIFO interrupts */
1513 rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), 1513 rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
1514 RCANFD_RFCC_RFIE); 1514 RCANFD_RFCC_RFIE);
1515 }
1515 } 1516 }
1516 return num_pkts; 1517 return num_pkts;
1517} 1518}
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 185c7f7d38a4..5e0d5e8101c8 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -479,7 +479,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
479 if (!netdev) 479 if (!netdev)
480 continue; 480 continue;
481 481
482 strncpy(name, netdev->name, IFNAMSIZ); 482 strlcpy(name, netdev->name, IFNAMSIZ);
483 483
484 unregister_sja1000dev(netdev); 484 unregister_sja1000dev(netdev);
485 485
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 44e99e3d7134..12358f06d194 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -664,17 +664,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
664 return regulator_disable(reg); 664 return regulator_disable(reg);
665} 665}
666 666
667static void mcp251x_open_clean(struct net_device *net)
668{
669 struct mcp251x_priv *priv = netdev_priv(net);
670 struct spi_device *spi = priv->spi;
671
672 free_irq(spi->irq, priv);
673 mcp251x_hw_sleep(spi);
674 mcp251x_power_enable(priv->transceiver, 0);
675 close_candev(net);
676}
677
678static int mcp251x_stop(struct net_device *net) 667static int mcp251x_stop(struct net_device *net)
679{ 668{
680 struct mcp251x_priv *priv = netdev_priv(net); 669 struct mcp251x_priv *priv = netdev_priv(net);
@@ -860,7 +849,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
860 if (new_state >= CAN_STATE_ERROR_WARNING && 849 if (new_state >= CAN_STATE_ERROR_WARNING &&
861 new_state <= CAN_STATE_BUS_OFF) 850 new_state <= CAN_STATE_BUS_OFF)
862 priv->can.can_stats.error_warning++; 851 priv->can.can_stats.error_warning++;
863 case CAN_STATE_ERROR_WARNING: /* fallthrough */ 852 /* fall through */
853 case CAN_STATE_ERROR_WARNING:
864 if (new_state >= CAN_STATE_ERROR_PASSIVE && 854 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
865 new_state <= CAN_STATE_BUS_OFF) 855 new_state <= CAN_STATE_BUS_OFF)
866 priv->can.can_stats.error_passive++; 856 priv->can.can_stats.error_passive++;
@@ -940,37 +930,43 @@ static int mcp251x_open(struct net_device *net)
940 flags | IRQF_ONESHOT, DEVICE_NAME, priv); 930 flags | IRQF_ONESHOT, DEVICE_NAME, priv);
941 if (ret) { 931 if (ret) {
942 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 932 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
943 mcp251x_power_enable(priv->transceiver, 0); 933 goto out_close;
944 close_candev(net);
945 goto open_unlock;
946 } 934 }
947 935
948 priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 936 priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
949 0); 937 0);
938 if (!priv->wq) {
939 ret = -ENOMEM;
940 goto out_clean;
941 }
950 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 942 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
951 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 943 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
952 944
953 ret = mcp251x_hw_reset(spi); 945 ret = mcp251x_hw_reset(spi);
954 if (ret) { 946 if (ret)
955 mcp251x_open_clean(net); 947 goto out_free_wq;
956 goto open_unlock;
957 }
958 ret = mcp251x_setup(net, spi); 948 ret = mcp251x_setup(net, spi);
959 if (ret) { 949 if (ret)
960 mcp251x_open_clean(net); 950 goto out_free_wq;
961 goto open_unlock;
962 }
963 ret = mcp251x_set_normal_mode(spi); 951 ret = mcp251x_set_normal_mode(spi);
964 if (ret) { 952 if (ret)
965 mcp251x_open_clean(net); 953 goto out_free_wq;
966 goto open_unlock;
967 }
968 954
969 can_led_event(net, CAN_LED_EVENT_OPEN); 955 can_led_event(net, CAN_LED_EVENT_OPEN);
970 956
971 netif_wake_queue(net); 957 netif_wake_queue(net);
958 mutex_unlock(&priv->mcp_lock);
972 959
973open_unlock: 960 return 0;
961
962out_free_wq:
963 destroy_workqueue(priv->wq);
964out_clean:
965 free_irq(spi->irq, priv);
966 mcp251x_hw_sleep(spi);
967out_close:
968 mcp251x_power_enable(priv->transceiver, 0);
969 close_candev(net);
974 mutex_unlock(&priv->mcp_lock); 970 mutex_unlock(&priv->mcp_lock);
975 return ret; 971 return ret;
976} 972}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 15ce5ad1d632..617da295b6c1 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
415 new_state = CAN_STATE_ERROR_WARNING; 415 new_state = CAN_STATE_ERROR_WARNING;
416 break; 416 break;
417 } 417 }
418 /* else: fall through */ 418 /* fall through */
419 419
420 case CAN_STATE_ERROR_WARNING: 420 case CAN_STATE_ERROR_WARNING:
421 if (n & PCAN_USB_ERROR_BUS_HEAVY) { 421 if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 458154c9b482..65dce642b86b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
568 dev->state &= ~PCAN_USB_STATE_STARTED; 568 dev->state &= ~PCAN_USB_STATE_STARTED;
569 netif_stop_queue(netdev); 569 netif_stop_queue(netdev);
570 570
571 close_candev(netdev);
572
573 dev->can.state = CAN_STATE_STOPPED;
574
571 /* unlink all pending urbs and free used memory */ 575 /* unlink all pending urbs and free used memory */
572 peak_usb_unlink_all_urbs(dev); 576 peak_usb_unlink_all_urbs(dev);
573 577
574 if (dev->adapter->dev_stop) 578 if (dev->adapter->dev_stop)
575 dev->adapter->dev_stop(dev); 579 dev->adapter->dev_stop(dev);
576 580
577 close_candev(netdev);
578
579 dev->can.state = CAN_STATE_STOPPED;
580
581 /* can set bus off now */ 581 /* can set bus off now */
582 if (dev->adapter->dev_set_bus) { 582 if (dev->adapter->dev_set_bus) {
583 int err = dev->adapter->dev_set_bus(dev, 0); 583 int err = dev->adapter->dev_set_bus(dev, 0);
@@ -855,7 +855,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
855 855
856 dev_prev_siblings = dev->prev_siblings; 856 dev_prev_siblings = dev->prev_siblings;
857 dev->state &= ~PCAN_USB_STATE_CONNECTED; 857 dev->state &= ~PCAN_USB_STATE_CONNECTED;
858 strncpy(name, netdev->name, IFNAMSIZ); 858 strlcpy(name, netdev->name, IFNAMSIZ);
859 859
860 unregister_netdev(netdev); 860 unregister_netdev(netdev);
861 861
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 34761c3a6286..47cc1ff5b88e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
841 goto err_out; 841 goto err_out;
842 842
843 /* allocate command buffer once for all for the interface */ 843 /* allocate command buffer once for all for the interface */
844 pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE, 844 pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
845 GFP_KERNEL); 845 GFP_KERNEL);
846 if (!pdev->cmd_buffer_addr) 846 if (!pdev->cmd_buffer_addr)
847 goto err_out_1; 847 goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 178bb7cff0c1..53cb2f72bdd0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
494 u8 *buffer; 494 u8 *buffer;
495 int err; 495 int err;
496 496
497 buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); 497 buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
498 if (!buffer) 498 if (!buffer)
499 return -ENOMEM; 499 return -ENOMEM;
500 500
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3811fdbda13e..28c963a21dac 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
478 unsigned long *supported, 478 unsigned long *supported,
479 struct phylink_link_state *state) 479 struct phylink_link_state *state)
480{ 480{
481 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
481 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 482 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
482 483
483 if (!phy_interface_mode_is_rgmii(state->interface) && 484 if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
487 state->interface != PHY_INTERFACE_MODE_INTERNAL && 488 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
488 state->interface != PHY_INTERFACE_MODE_MOCA) { 489 state->interface != PHY_INTERFACE_MODE_MOCA) {
489 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 490 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
490 dev_err(ds->dev, 491 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
491 "Unsupported interface: %d\n", state->interface); 492 dev_err(ds->dev,
493 "Unsupported interface: %d for port %d\n",
494 state->interface, port);
492 return; 495 return;
493 } 496 }
494 497
@@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
526 u32 id_mode_dis = 0, port_mode; 529 u32 id_mode_dis = 0, port_mode;
527 u32 reg, offset; 530 u32 reg, offset;
528 531
532 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
533 return;
534
529 if (priv->type == BCM7445_DEVICE_ID) 535 if (priv->type == BCM7445_DEVICE_ID)
530 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 536 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
531 else 537 else
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 5a9e27b337a8..098b01e4ed1a 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
81 { .compatible = "microchip,ksz9897" }, 81 { .compatible = "microchip,ksz9897" },
82 { .compatible = "microchip,ksz9893" }, 82 { .compatible = "microchip,ksz9893" },
83 { .compatible = "microchip,ksz9563" }, 83 { .compatible = "microchip,ksz9563" },
84 { .compatible = "microchip,ksz8563" },
84 {}, 85 {},
85}; 86};
86MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); 87MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index ee7096d8af07..72ec250b9540 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
128 128
129#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ 129#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
130 { \ 130 { \
131 .name = #width, \
131 .val_bits = (width), \ 132 .val_bits = (width), \
132 .reg_stride = (width) / 8, \ 133 .reg_stride = (width) / 8, \
133 .reg_bits = (regbits) + (regalign), \ 134 .reg_bits = (regbits) + (regalign), \
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 6b17cd961d06..d0a97eb73a37 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -27,7 +27,6 @@
27#include <linux/platform_data/mv88e6xxx.h> 27#include <linux/platform_data/mv88e6xxx.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/gpio/consumer.h> 29#include <linux/gpio/consumer.h>
30#include <linux/phy.h>
31#include <linux/phylink.h> 30#include <linux/phylink.h>
32#include <net/dsa.h> 31#include <net/dsa.h>
33 32
@@ -430,7 +429,7 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
430 return 0; 429 return 0;
431 430
432 /* Port's MAC control must not be changed unless the link is down */ 431 /* Port's MAC control must not be changed unless the link is down */
433 err = chip->info->ops->port_set_link(chip, port, 0); 432 err = chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
434 if (err) 433 if (err)
435 return err; 434 return err;
436 435
@@ -482,30 +481,6 @@ static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
482 return port < chip->info->num_internal_phys; 481 return port < chip->info->num_internal_phys;
483} 482}
484 483
485/* We expect the switch to perform auto negotiation if there is a real
486 * phy. However, in the case of a fixed link phy, we force the port
487 * settings from the fixed link settings.
488 */
489static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
490 struct phy_device *phydev)
491{
492 struct mv88e6xxx_chip *chip = ds->priv;
493 int err;
494
495 if (!phy_is_pseudo_fixed_link(phydev) &&
496 mv88e6xxx_phy_is_internal(ds, port))
497 return;
498
499 mv88e6xxx_reg_lock(chip);
500 err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
501 phydev->duplex, phydev->pause,
502 phydev->interface);
503 mv88e6xxx_reg_unlock(chip);
504
505 if (err && err != -EOPNOTSUPP)
506 dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
507}
508
509static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port, 484static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
510 unsigned long *mask, 485 unsigned long *mask,
511 struct phylink_link_state *state) 486 struct phylink_link_state *state)
@@ -2721,6 +2696,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
2721 err = mv88e6xxx_mdio_register(chip, child, true); 2696 err = mv88e6xxx_mdio_register(chip, child, true);
2722 if (err) { 2697 if (err) {
2723 mv88e6xxx_mdios_unregister(chip); 2698 mv88e6xxx_mdios_unregister(chip);
2699 of_node_put(child);
2724 return err; 2700 return err;
2725 } 2701 }
2726 } 2702 }
@@ -4638,7 +4614,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
4638static const struct dsa_switch_ops mv88e6xxx_switch_ops = { 4614static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
4639 .get_tag_protocol = mv88e6xxx_get_tag_protocol, 4615 .get_tag_protocol = mv88e6xxx_get_tag_protocol,
4640 .setup = mv88e6xxx_setup, 4616 .setup = mv88e6xxx_setup,
4641 .adjust_link = mv88e6xxx_adjust_link,
4642 .phylink_validate = mv88e6xxx_validate, 4617 .phylink_validate = mv88e6xxx_validate,
4643 .phylink_mac_link_state = mv88e6xxx_link_state, 4618 .phylink_mac_link_state = mv88e6xxx_link_state,
4644 .phylink_mac_config = mv88e6xxx_mac_config, 4619 .phylink_mac_config = mv88e6xxx_mac_config,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 232e8cc96f6d..16f15c93a102 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> 4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 5 * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2016 John Crispin <john@phrozen.org> 6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
7 */ 7 */
8 8
@@ -583,8 +583,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
583 583
584 for_each_available_child_of_node(ports, port) { 584 for_each_available_child_of_node(ports, port) {
585 err = of_property_read_u32(port, "reg", &reg); 585 err = of_property_read_u32(port, "reg", &reg);
586 if (err) 586 if (err) {
587 of_node_put(port);
588 of_node_put(ports);
587 return err; 589 return err;
590 }
588 591
589 if (!dsa_is_user_port(priv->ds, reg)) 592 if (!dsa_is_user_port(priv->ds, reg))
590 continue; 593 continue;
@@ -595,6 +598,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
595 internal_mdio_mask |= BIT(reg); 598 internal_mdio_mask |= BIT(reg);
596 } 599 }
597 600
601 of_node_put(ports);
598 if (!external_mdio_mask && !internal_mdio_mask) { 602 if (!external_mdio_mask && !internal_mdio_mask) {
599 dev_err(priv->dev, "no PHYs are defined.\n"); 603 dev_err(priv->dev, "no PHYs are defined.\n");
600 return -EINVAL; 604 return -EINVAL;
@@ -935,6 +939,8 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
935 qca8k_port_set_status(priv, port, 1); 939 qca8k_port_set_status(priv, port, 1);
936 priv->port_sts[port].enabled = 1; 940 priv->port_sts[port].enabled = 1;
937 941
942 phy_support_asym_pause(phy);
943
938 return 0; 944 return 0;
939} 945}
940 946
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 6bfb1696a6f2..9988c9d18567 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -277,6 +277,18 @@ sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
277 SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op); 277 SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
278} 278}
279 279
280static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
281 enum packing_op op)
282{
283 struct sja1105_l2_lookup_entry *entry = entry_ptr;
284 u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
285 const int size = SJA1105_SIZE_DYN_CMD;
286
287 sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
288
289 return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
290}
291
280static void 292static void
281sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, 293sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
282 enum packing_op op) 294 enum packing_op op)
@@ -477,7 +489,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
477/* SJA1105E/T: First generation */ 489/* SJA1105E/T: First generation */
478struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { 490struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
479 [BLK_IDX_L2_LOOKUP] = { 491 [BLK_IDX_L2_LOOKUP] = {
480 .entry_packing = sja1105et_l2_lookup_entry_packing, 492 .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
481 .cmd_packing = sja1105et_l2_lookup_cmd_packing, 493 .cmd_packing = sja1105et_l2_lookup_cmd_packing,
482 .access = (OP_READ | OP_WRITE | OP_DEL), 494 .access = (OP_READ | OP_WRITE | OP_DEL),
483 .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, 495 .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 32bf3a7cc3b6..df976b259e43 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -218,7 +218,7 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
218 /* This selects between Independent VLAN Learning (IVL) and 218 /* This selects between Independent VLAN Learning (IVL) and
219 * Shared VLAN Learning (SVL) 219 * Shared VLAN Learning (SVL)
220 */ 220 */
221 .shared_learn = false, 221 .shared_learn = true,
222 /* Don't discard management traffic based on ENFPORT - 222 /* Don't discard management traffic based on ENFPORT -
223 * we don't perform SMAC port enforcement anyway, so 223 * we don't perform SMAC port enforcement anyway, so
224 * what we are setting here doesn't matter. 224 * what we are setting here doesn't matter.
@@ -625,6 +625,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
625 if (of_property_read_u32(child, "reg", &index) < 0) { 625 if (of_property_read_u32(child, "reg", &index) < 0) {
626 dev_err(dev, "Port number not defined in device tree " 626 dev_err(dev, "Port number not defined in device tree "
627 "(property \"reg\")\n"); 627 "(property \"reg\")\n");
628 of_node_put(child);
628 return -ENODEV; 629 return -ENODEV;
629 } 630 }
630 631
@@ -634,6 +635,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
634 dev_err(dev, "Failed to read phy-mode or " 635 dev_err(dev, "Failed to read phy-mode or "
635 "phy-interface-type property for port %d\n", 636 "phy-interface-type property for port %d\n",
636 index); 637 index);
638 of_node_put(child);
637 return -ENODEV; 639 return -ENODEV;
638 } 640 }
639 ports[index].phy_mode = phy_mode; 641 ports[index].phy_mode = phy_mode;
@@ -643,6 +645,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
643 if (!of_phy_is_fixed_link(child)) { 645 if (!of_phy_is_fixed_link(child)) {
644 dev_err(dev, "phy-handle or fixed-link " 646 dev_err(dev, "phy-handle or fixed-link "
645 "properties missing!\n"); 647 "properties missing!\n");
648 of_node_put(child);
646 return -ENODEV; 649 return -ENODEV;
647 } 650 }
648 /* phy-handle is missing, but fixed-link isn't. 651 /* phy-handle is missing, but fixed-link isn't.
@@ -1089,8 +1092,13 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1089 l2_lookup.vlanid = vid; 1092 l2_lookup.vlanid = vid;
1090 l2_lookup.iotag = SJA1105_S_TAG; 1093 l2_lookup.iotag = SJA1105_S_TAG;
1091 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1094 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1092 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1095 if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
1093 l2_lookup.mask_iotag = BIT(0); 1096 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1097 l2_lookup.mask_iotag = BIT(0);
1098 } else {
1099 l2_lookup.mask_vlanid = 0;
1100 l2_lookup.mask_iotag = 0;
1101 }
1094 l2_lookup.destports = BIT(port); 1102 l2_lookup.destports = BIT(port);
1095 1103
1096 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1104 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1147,8 +1155,13 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1147 l2_lookup.vlanid = vid; 1155 l2_lookup.vlanid = vid;
1148 l2_lookup.iotag = SJA1105_S_TAG; 1156 l2_lookup.iotag = SJA1105_S_TAG;
1149 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1157 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1150 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1158 if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
1151 l2_lookup.mask_iotag = BIT(0); 1159 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1160 l2_lookup.mask_iotag = BIT(0);
1161 } else {
1162 l2_lookup.mask_vlanid = 0;
1163 l2_lookup.mask_iotag = 0;
1164 }
1152 l2_lookup.destports = BIT(port); 1165 l2_lookup.destports = BIT(port);
1153 1166
1154 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1167 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1178,60 +1191,31 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1178 const unsigned char *addr, u16 vid) 1191 const unsigned char *addr, u16 vid)
1179{ 1192{
1180 struct sja1105_private *priv = ds->priv; 1193 struct sja1105_private *priv = ds->priv;
1181 u16 rx_vid, tx_vid;
1182 int rc, i;
1183 1194
1184 if (dsa_port_is_vlan_filtering(&ds->ports[port])) 1195 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1185 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1196 * so the switch still does some VLAN processing internally.
1186 1197 * But Shared VLAN Learning (SVL) is also active, and it will take
1187 /* Since we make use of VLANs even when the bridge core doesn't tell us 1198 * care of autonomous forwarding between the unique pvid's of each
1188 * to, translate these FDB entries into the correct dsa_8021q ones. 1199 * port. Here we just make sure that users can't add duplicate FDB
1189 * The basic idea (also repeats for removal below) is: 1200 * entries when in this mode - the actual VID doesn't matter except
1190 * - Each of the other front-panel ports needs to be able to forward a 1201 * for what gets printed in 'bridge fdb show'. In the case of zero,
1191 * pvid-tagged (aka tagged with their rx_vid) frame that matches this 1202 * no VID gets printed at all.
1192 * DMAC.
1193 * - The CPU port (aka the tx_vid of this port) needs to be able to
1194 * send a frame matching this DMAC to the specified port.
1195 * For a better picture see net/dsa/tag_8021q.c.
1196 */ 1203 */
1197 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1204 if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1198 if (i == port) 1205 vid = 0;
1199 continue;
1200 if (i == dsa_upstream_port(priv->ds, port))
1201 continue;
1202 1206
1203 rx_vid = dsa_8021q_rx_vid(ds, i); 1207 return priv->info->fdb_add_cmd(ds, port, addr, vid);
1204 rc = priv->info->fdb_add_cmd(ds, port, addr, rx_vid);
1205 if (rc < 0)
1206 return rc;
1207 }
1208 tx_vid = dsa_8021q_tx_vid(ds, port);
1209 return priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
1210} 1208}
1211 1209
1212static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1210static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1213 const unsigned char *addr, u16 vid) 1211 const unsigned char *addr, u16 vid)
1214{ 1212{
1215 struct sja1105_private *priv = ds->priv; 1213 struct sja1105_private *priv = ds->priv;
1216 u16 rx_vid, tx_vid;
1217 int rc, i;
1218 1214
1219 if (dsa_port_is_vlan_filtering(&ds->ports[port])) 1215 if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1220 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1216 vid = 0;
1221 1217
1222 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1218 return priv->info->fdb_del_cmd(ds, port, addr, vid);
1223 if (i == port)
1224 continue;
1225 if (i == dsa_upstream_port(priv->ds, port))
1226 continue;
1227
1228 rx_vid = dsa_8021q_rx_vid(ds, i);
1229 rc = priv->info->fdb_del_cmd(ds, port, addr, rx_vid);
1230 if (rc < 0)
1231 return rc;
1232 }
1233 tx_vid = dsa_8021q_tx_vid(ds, port);
1234 return priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
1235} 1219}
1236 1220
1237static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1221static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
@@ -1239,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1239{ 1223{
1240 struct sja1105_private *priv = ds->priv; 1224 struct sja1105_private *priv = ds->priv;
1241 struct device *dev = ds->dev; 1225 struct device *dev = ds->dev;
1242 u16 rx_vid, tx_vid;
1243 int i; 1226 int i;
1244 1227
1245 rx_vid = dsa_8021q_rx_vid(ds, port);
1246 tx_vid = dsa_8021q_tx_vid(ds, port);
1247
1248 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1228 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1249 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1229 struct sja1105_l2_lookup_entry l2_lookup = {0};
1250 u8 macaddr[ETH_ALEN]; 1230 u8 macaddr[ETH_ALEN];
@@ -1270,39 +1250,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1270 continue; 1250 continue;
1271 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1251 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1272 1252
1273 /* On SJA1105 E/T, the switch doesn't implement the LOCKEDS 1253 /* We need to hide the dsa_8021q VLANs from the user. */
1274 * bit, so it doesn't tell us whether a FDB entry is static 1254 if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1275 * or not. 1255 l2_lookup.vlanid = 0;
1276 * But, of course, we can find out - we're the ones who added
1277 * it in the first place.
1278 */
1279 if (priv->info->device_id == SJA1105E_DEVICE_ID ||
1280 priv->info->device_id == SJA1105T_DEVICE_ID) {
1281 int match;
1282
1283 match = sja1105_find_static_fdb_entry(priv, port,
1284 &l2_lookup);
1285 l2_lookup.lockeds = (match >= 0);
1286 }
1287
1288 /* We need to hide the dsa_8021q VLANs from the user. This
1289 * basically means hiding the duplicates and only showing
1290 * the pvid that is supposed to be active in standalone and
1291 * non-vlan_filtering modes (aka 1).
1292 * - For statically added FDB entries (bridge fdb add), we
1293 * can convert the TX VID (coming from the CPU port) into the
1294 * pvid and ignore the RX VIDs of the other ports.
1295 * - For dynamically learned FDB entries, a single entry with
1296 * no duplicates is learned - that which has the real port's
1297 * pvid, aka RX VID.
1298 */
1299 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
1300 if (l2_lookup.vlanid == tx_vid ||
1301 l2_lookup.vlanid == rx_vid)
1302 l2_lookup.vlanid = 1;
1303 else
1304 continue;
1305 }
1306 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1256 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1307 } 1257 }
1308 return 0; 1258 return 0;
@@ -1594,6 +1544,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1594 */ 1544 */
1595static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1545static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1596{ 1546{
1547 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1597 struct sja1105_general_params_entry *general_params; 1548 struct sja1105_general_params_entry *general_params;
1598 struct sja1105_private *priv = ds->priv; 1549 struct sja1105_private *priv = ds->priv;
1599 struct sja1105_table *table; 1550 struct sja1105_table *table;
@@ -1622,6 +1573,28 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1622 general_params->incl_srcpt1 = enabled; 1573 general_params->incl_srcpt1 = enabled;
1623 general_params->incl_srcpt0 = enabled; 1574 general_params->incl_srcpt0 = enabled;
1624 1575
1576 /* VLAN filtering => independent VLAN learning.
1577 * No VLAN filtering => shared VLAN learning.
1578 *
1579 * In shared VLAN learning mode, untagged traffic still gets
1580 * pvid-tagged, and the FDB table gets populated with entries
1581 * containing the "real" (pvid or from VLAN tag) VLAN ID.
1582 * However the switch performs a masked L2 lookup in the FDB,
1583 * effectively only looking up a frame's DMAC (and not VID) for the
1584 * forwarding decision.
1585 *
1586 * This is extremely convenient for us, because in modes with
1587 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
1588 * each front panel port. This is good for identification but breaks
1589 * learning badly - the VID of the learnt FDB entry is unique, aka
1590 * no frames coming from any other port are going to have it. So
1591 * for forwarding purposes, this is as though learning was broken
1592 * (all frames get flooded).
1593 */
1594 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1595 l2_lookup_params = table->entries;
1596 l2_lookup_params->shared_learn = !enabled;
1597
1625 rc = sja1105_static_config_reload(priv); 1598 rc = sja1105_static_config_reload(priv);
1626 if (rc) 1599 if (rc)
1627 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1600 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1751,6 +1724,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
1751 1724
1752 cancel_work_sync(&priv->tagger_data.rxtstamp_work); 1725 cancel_work_sync(&priv->tagger_data.rxtstamp_work);
1753 skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); 1726 skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
1727 sja1105_ptp_clock_unregister(priv);
1728 sja1105_static_config_free(&priv->static_config);
1754} 1729}
1755 1730
1756static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 1731static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
@@ -2208,9 +2183,7 @@ static int sja1105_remove(struct spi_device *spi)
2208{ 2183{
2209 struct sja1105_private *priv = spi_get_drvdata(spi); 2184 struct sja1105_private *priv = spi_get_drvdata(spi);
2210 2185
2211 sja1105_ptp_clock_unregister(priv);
2212 dsa_unregister_switch(priv->ds); 2186 dsa_unregister_switch(priv->ds);
2213 sja1105_static_config_free(&priv->static_config);
2214 return 0; 2187 return 0;
2215} 2188}
2216 2189
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index d19cfdf681af..d8e8dd59f3d1 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -369,16 +369,15 @@ int sja1105_ptp_clock_register(struct sja1105_private *priv)
369 .mult = SJA1105_CC_MULT, 369 .mult = SJA1105_CC_MULT,
370 }; 370 };
371 mutex_init(&priv->ptp_lock); 371 mutex_init(&priv->ptp_lock);
372 INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
373
374 schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
375
376 priv->ptp_caps = sja1105_ptp_caps; 372 priv->ptp_caps = sja1105_ptp_caps;
377 373
378 priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev); 374 priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
379 if (IS_ERR_OR_NULL(priv->clock)) 375 if (IS_ERR_OR_NULL(priv->clock))
380 return PTR_ERR(priv->clock); 376 return PTR_ERR(priv->clock);
381 377
378 INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
379 schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
380
382 return sja1105_ptp_reset(priv); 381 return sja1105_ptp_reset(priv);
383} 382}
384 383
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2a3e2450968e..a9478577b495 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -12,8 +12,8 @@ config NET_VENDOR_8390
12 12
13 Note that the answer to this question doesn't directly affect the 13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all 14 kernel: saying N will just cause the configurator to skip all
15 the questions about Western Digital cards. If you say Y, you will be 15 the questions about National Semiconductor 8390 cards. If you say Y,
16 asked for your specific card in the following questions. 16 you will be asked for your specific card in the following questions.
17 17
18if NET_VENDOR_8390 18if NET_VENDOR_8390
19 19
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index ea34bcb868b5..edbb4b3604c7 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2362,7 +2362,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2362 2362
2363 /* Allocate memory for the TCB's (Transmit Control Block) */ 2363 /* Allocate memory for the TCB's (Transmit Control Block) */
2364 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), 2364 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2365 GFP_ATOMIC | GFP_DMA); 2365 GFP_KERNEL | GFP_DMA);
2366 if (!tx_ring->tcb_ring) 2366 if (!tx_ring->tcb_ring)
2367 return -ENOMEM; 2367 return -ENOMEM;
2368 2368
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 3434730a7699..0537df06a9b5 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -860,7 +860,9 @@ static int emac_probe(struct platform_device *pdev)
860 goto out_clk_disable_unprepare; 860 goto out_clk_disable_unprepare;
861 } 861 }
862 862
863 db->phy_node = of_parse_phandle(np, "phy", 0); 863 db->phy_node = of_parse_phandle(np, "phy-handle", 0);
864 if (!db->phy_node)
865 db->phy_node = of_parse_phandle(np, "phy", 0);
864 if (!db->phy_node) { 866 if (!db->phy_node) {
865 dev_err(&pdev->dev, "no associated PHY\n"); 867 dev_err(&pdev->dev, "no associated PHY\n");
866 ret = -ENODEV; 868 ret = -ENODEV;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index de4950d2022e..9f965cdfff5c 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -14,7 +14,7 @@ config NET_VENDOR_AMD
14 say Y. 14 say Y.
15 15
16 Note that the answer to this question does not directly affect 16 Note that the answer to this question does not directly affect
17 the kernel: saying N will just case the configurator to skip all 17 the kernel: saying N will just cause the configurator to skip all
18 the questions regarding AMD chipsets. If you say Y, you will be asked 18 the questions regarding AMD chipsets. If you say Y, you will be asked
19 for your specific chipset/driver in the following questions. 19 for your specific chipset/driver in the following questions.
20 20
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index b41f23679a08..7ce9c69e9c44 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
469 469
470 ret = xgbe_platform_init(); 470 ret = xgbe_platform_init();
471 if (ret) 471 if (ret)
472 return ret; 472 goto err_platform_init;
473 473
474 ret = xgbe_pci_init(); 474 ret = xgbe_pci_init();
475 if (ret) 475 if (ret)
476 return ret; 476 goto err_pci_init;
477 477
478 return 0; 478 return 0;
479
480err_pci_init:
481 xgbe_platform_exit();
482err_platform_init:
483 unregister_netdevice_notifier(&xgbe_netdev_notifier);
484 return ret;
479} 485}
480 486
481static void __exit xgbe_mod_exit(void) 487static void __exit xgbe_mod_exit(void)
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index fde7ae33e302..f78b9c841296 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -11,8 +11,8 @@ config NET_VENDOR_APPLE
11 If you have a network (Ethernet) card belonging to this class, say Y. 11 If you have a network (Ethernet) card belonging to this class, say Y.
12 12
13 Note that the answer to this question doesn't directly affect the 13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all 14 kernel: saying N will just cause the configurator to skip all the
15 the questions about IBM devices. If you say Y, you will be asked for 15 questions about Apple devices. If you say Y, you will be asked for
16 your specific card in the following questions. 16 your specific card in the following questions.
17 17
18if NET_VENDOR_APPLE 18if NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 440690b18734..aee827f07c16 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) 431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
432 break; 432 break;
433 } 433 }
434 if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { 434 if (rule && rule->type == aq_rx_filter_vlan &&
435 be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
435 struct ethtool_rxnfc cmd; 436 struct ethtool_rxnfc cmd;
436 437
437 cmd.fs.location = rule->aq_fsp.location; 438 cmd.fs.location = rule->aq_fsp.location;
@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
843 return err; 844 return err;
844 845
845 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { 846 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
846 if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { 847 if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
847 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, 848 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
848 !(aq_nic->packet_filter & IFF_PROMISC)); 849 !(aq_nic->packet_filter & IFF_PROMISC));
849 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; 850 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 100722ad5c2d..b4a0fb281e69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
61 if (err < 0) 61 if (err < 0)
62 goto err_exit; 62 goto err_exit;
63 63
64 err = aq_filters_vlans_update(aq_nic);
65 if (err < 0)
66 goto err_exit;
67
64 err = aq_nic_start(aq_nic); 68 err = aq_nic_start(aq_nic);
65 if (err < 0) 69 if (err < 0)
66 goto err_exit; 70 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e1392766e21e..8f66e7817811 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self)
393 self->aq_nic_cfg.link_irq_vec); 393 self->aq_nic_cfg.link_irq_vec);
394 err = request_threaded_irq(irqvec, NULL, 394 err = request_threaded_irq(irqvec, NULL,
395 aq_linkstate_threaded_isr, 395 aq_linkstate_threaded_isr,
396 IRQF_SHARED, 396 IRQF_SHARED | IRQF_ONESHOT,
397 self->ndev->name, self); 397 self->ndev->name, self);
398 if (err < 0) 398 if (err < 0)
399 goto err_exit; 399 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 715685aa48c3..28892b8acd0e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
86 } 86 }
87 } 87 }
88 88
89err_exit:
89 if (!was_tx_cleaned) 90 if (!was_tx_cleaned)
90 work_done = budget; 91 work_done = budget;
91 92
@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
95 1U << self->aq_ring_param.vec_idx); 96 1U << self->aq_ring_param.vec_idx);
96 } 97 }
97 } 98 }
98err_exit: 99
99 return work_done; 100 return work_done;
100} 101}
101 102
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 8b69d0d7e726..6703960c7cf5 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1141,7 +1141,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
1141 1141
1142 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, 1142 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1143 ring_size * AG71XX_DESC_SIZE, 1143 ring_size * AG71XX_DESC_SIZE,
1144 &tx->descs_dma, GFP_ATOMIC); 1144 &tx->descs_dma, GFP_KERNEL);
1145 if (!tx->descs_cpu) { 1145 if (!tx->descs_cpu) {
1146 kfree(tx->buf); 1146 kfree(tx->buf);
1147 tx->buf = NULL; 1147 tx->buf = NULL;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e9017caf024d..e24f5d2b6afe 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -14,9 +14,9 @@ config NET_VENDOR_BROADCOM
14 say Y. 14 say Y.
15 15
16 Note that the answer to this question does not directly affect 16 Note that the answer to this question does not directly affect
17 the kernel: saying N will just case the configurator to skip all 17 the kernel: saying N will just cause the configurator to skip all
18 the questions regarding AMD chipsets. If you say Y, you will be asked 18 the questions regarding Broadcom chipsets. If you say Y, you will
19 for your specific chipset/driver in the following questions. 19 be asked for your specific chipset/driver in the following questions.
20 20
21if NET_VENDOR_BROADCOM 21if NET_VENDOR_BROADCOM
22 22
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9c5cea8db16..9483553ce444 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -992,7 +992,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
992{ 992{
993 struct bcm_sysport_priv *priv = 993 struct bcm_sysport_priv *priv =
994 container_of(napi, struct bcm_sysport_priv, napi); 994 container_of(napi, struct bcm_sysport_priv, napi);
995 struct dim_sample dim_sample; 995 struct dim_sample dim_sample = {};
996 unsigned int work_done = 0; 996 unsigned int work_done = 0;
997 997
998 work_done = bcm_sysport_desc_rx(priv, budget); 998 work_done = bcm_sysport_desc_rx(priv, budget);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 656ed80647f0..d10b421ed1f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons; 286 sw_cons = txdata->tx_pkt_cons;
287 287
288 /* Ensure subsequent loads occur after hw_cons */
289 smp_rmb();
290
288 while (sw_cons != hw_cons) { 291 while (sw_cons != hw_cons) {
289 u16 pkt_cons; 292 u16 pkt_cons;
290 293
@@ -1931,8 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1931 } 1934 }
1932 1935
1933 /* select a non-FCoE queue */ 1936 /* select a non-FCoE queue */
1934 return netdev_pick_tx(dev, skb, NULL) % 1937 return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
1935 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1936} 1938}
1937 1939
1938void bnx2x_set_num_queues(struct bnx2x *bp) 1940void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3055,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3055 /* if VF indicate to PF this function is going down (PF will delete sp 3057 /* if VF indicate to PF this function is going down (PF will delete sp
3056 * elements and clear initializations 3058 * elements and clear initializations
3057 */ 3059 */
3058 if (IS_VF(bp)) 3060 if (IS_VF(bp)) {
3061 bnx2x_clear_vlan_info(bp);
3059 bnx2x_vfpf_close_vf(bp); 3062 bnx2x_vfpf_close_vf(bp);
3060 else if (unload_mode != UNLOAD_RECOVERY) 3063 } else if (unload_mode != UNLOAD_RECOVERY) {
3061 /* if this is a normal/close unload need to clean up chip*/ 3064 /* if this is a normal/close unload need to clean up chip*/
3062 bnx2x_chip_cleanup(bp, unload_mode, keep_link); 3065 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3063 else { 3066 } else {
3064 /* Send the UNLOAD_REQUEST to the MCP */ 3067 /* Send the UNLOAD_REQUEST to the MCP */
3065 bnx2x_send_unload_req(bp, unload_mode); 3068 bnx2x_send_unload_req(bp, unload_mode);
3066 3069
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c2f6e44e9a3f..8b08cb18e363 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
425void bnx2x_disable_close_the_gate(struct bnx2x *bp); 425void bnx2x_disable_close_the_gate(struct bnx2x *bp);
426int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 426int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
427 427
428void bnx2x_clear_vlan_info(struct bnx2x *bp);
429
428/** 430/**
429 * bnx2x_sp_event - handle ramrods completion. 431 * bnx2x_sp_event - handle ramrods completion.
430 * 432 *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2cc14db8f0ec..192ff8d5da32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8482 return rc; 8482 return rc;
8483} 8483}
8484 8484
8485void bnx2x_clear_vlan_info(struct bnx2x *bp)
8486{
8487 struct bnx2x_vlan_entry *vlan;
8488
8489 /* Mark that hw forgot all entries */
8490 list_for_each_entry(vlan, &bp->vlan_reg, link)
8491 vlan->hw = false;
8492
8493 bp->vlan_cnt = 0;
8494}
8495
8485static int bnx2x_del_all_vlans(struct bnx2x *bp) 8496static int bnx2x_del_all_vlans(struct bnx2x *bp)
8486{ 8497{
8487 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; 8498 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8488 unsigned long ramrod_flags = 0, vlan_flags = 0; 8499 unsigned long ramrod_flags = 0, vlan_flags = 0;
8489 struct bnx2x_vlan_entry *vlan;
8490 int rc; 8500 int rc;
8491 8501
8492 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8502 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
8495 if (rc) 8505 if (rc)
8496 return rc; 8506 return rc;
8497 8507
8498 /* Mark that hw forgot all entries */ 8508 bnx2x_clear_vlan_info(bp);
8499 list_for_each_entry(vlan, &bp->vlan_reg, link)
8500 vlan->hw = false;
8501 bp->vlan_cnt = 0;
8502 8509
8503 return 0; 8510 return 0;
8504} 8511}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7134d2c3eb1c..8dce4069472b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2021,9 +2021,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2021 if (bnapi->events & BNXT_RX_EVENT) { 2021 if (bnapi->events & BNXT_RX_EVENT) {
2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2023 2023
2024 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2025 if (bnapi->events & BNXT_AGG_EVENT) 2024 if (bnapi->events & BNXT_AGG_EVENT)
2026 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2025 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2026 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2027 } 2027 }
2028 bnapi->events = 0; 2028 bnapi->events = 0;
2029} 2029}
@@ -2136,7 +2136,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
2136 } 2136 }
2137 } 2137 }
2138 if (bp->flags & BNXT_FLAG_DIM) { 2138 if (bp->flags & BNXT_FLAG_DIM) {
2139 struct dim_sample dim_sample; 2139 struct dim_sample dim_sample = {};
2140 2140
2141 dim_update_sample(cpr->event_ctr, 2141 dim_update_sample(cpr->event_ctr,
2142 cpr->rx_packets, 2142 cpr->rx_packets,
@@ -5064,6 +5064,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5064 5064
5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5066{ 5066{
5067 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5067 int i, rc = 0; 5068 int i, rc = 0;
5068 u32 type; 5069 u32 type;
5069 5070
@@ -5139,7 +5140,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5139 if (rc) 5140 if (rc)
5140 goto err_out; 5141 goto err_out;
5141 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5142 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5142 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5143 /* If we have agg rings, post agg buffers first. */
5144 if (!agg_rings)
5145 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5143 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5146 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5144 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5147 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5145 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5148 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -5158,7 +5161,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5158 } 5161 }
5159 } 5162 }
5160 5163
5161 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5164 if (agg_rings) {
5162 type = HWRM_RING_ALLOC_AGG; 5165 type = HWRM_RING_ALLOC_AGG;
5163 for (i = 0; i < bp->rx_nr_rings; i++) { 5166 for (i = 0; i < bp->rx_nr_rings; i++) {
5164 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5167 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -5174,6 +5177,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5174 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5177 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5175 ring->fw_ring_id); 5178 ring->fw_ring_id);
5176 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5179 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5180 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5177 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5181 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5178 } 5182 }
5179 } 5183 }
@@ -7016,19 +7020,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7016 bnxt_hwrm_vnic_set_rss(bp, i, false); 7020 bnxt_hwrm_vnic_set_rss(bp, i, false);
7017} 7021}
7018 7022
7019static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7023static void bnxt_clear_vnic(struct bnxt *bp)
7020 bool irq_re_init)
7021{ 7024{
7022 if (bp->vnic_info) { 7025 if (!bp->vnic_info)
7023 bnxt_hwrm_clear_vnic_filter(bp); 7026 return;
7027
7028 bnxt_hwrm_clear_vnic_filter(bp);
7029 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7024 /* clear all RSS setting before free vnic ctx */ 7030 /* clear all RSS setting before free vnic ctx */
7025 bnxt_hwrm_clear_vnic_rss(bp); 7031 bnxt_hwrm_clear_vnic_rss(bp);
7026 bnxt_hwrm_vnic_ctx_free(bp); 7032 bnxt_hwrm_vnic_ctx_free(bp);
7027 /* before free the vnic, undo the vnic tpa settings */
7028 if (bp->flags & BNXT_FLAG_TPA)
7029 bnxt_set_tpa(bp, false);
7030 bnxt_hwrm_vnic_free(bp);
7031 } 7033 }
7034 /* before free the vnic, undo the vnic tpa settings */
7035 if (bp->flags & BNXT_FLAG_TPA)
7036 bnxt_set_tpa(bp, false);
7037 bnxt_hwrm_vnic_free(bp);
7038 if (bp->flags & BNXT_FLAG_CHIP_P5)
7039 bnxt_hwrm_vnic_ctx_free(bp);
7040}
7041
7042static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7043 bool irq_re_init)
7044{
7045 bnxt_clear_vnic(bp);
7032 bnxt_hwrm_ring_free(bp, close_path); 7046 bnxt_hwrm_ring_free(bp, close_path);
7033 bnxt_hwrm_ring_grp_free(bp); 7047 bnxt_hwrm_ring_grp_free(bp);
7034 if (irq_re_init) { 7048 if (irq_re_init) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 549c90d3e465..c05d663212b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
98 if (idx) 98 if (idx)
99 req->dimensions = cpu_to_le16(1); 99 req->dimensions = cpu_to_le16(1);
100 100
101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) 101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
102 memcpy(data_addr, buf, bytesize); 102 memcpy(data_addr, buf, bytesize);
103 103 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
104 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 104 } else {
105 rc = hwrm_send_message_silent(bp, msg, msg_len,
106 HWRM_CMD_TIMEOUT);
107 }
105 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) 108 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
106 memcpy(buf, data_addr, bytesize); 109 memcpy(buf, data_addr, bytesize);
107 110
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index c7ee63d69679..8445a0cce849 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
2016 mutex_lock(&bp->hwrm_cmd_lock); 2016 mutex_lock(&bp->hwrm_cmd_lock);
2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), 2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
2018 INSTALL_PACKAGE_TIMEOUT); 2018 INSTALL_PACKAGE_TIMEOUT);
2019 if (hwrm_err) 2019 if (hwrm_err) {
2020 goto flash_pkg_exit;
2021
2022 if (resp->error_code) {
2023 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; 2020 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
2024 2021
2025 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2022 if (resp->error_code && error_code ==
2023 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2026 install.flags |= cpu_to_le16( 2024 install.flags |= cpu_to_le16(
2027 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2025 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2028 hwrm_err = _hwrm_send_message(bp, &install, 2026 hwrm_err = _hwrm_send_message(bp, &install,
2029 sizeof(install), 2027 sizeof(install),
2030 INSTALL_PACKAGE_TIMEOUT); 2028 INSTALL_PACKAGE_TIMEOUT);
2031 if (hwrm_err)
2032 goto flash_pkg_exit;
2033 } 2029 }
2030 if (hwrm_err)
2031 goto flash_pkg_exit;
2034 } 2032 }
2035 2033
2036 if (resp->result) { 2034 if (resp->result) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 6fe4a7174271..dd621f6bd127 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, 1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1237 u16 src_fid) 1237 u16 src_fid)
1238{ 1238{
1239 flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; 1239 flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1240} 1240}
1241 1241
1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, 1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1285 goto free_node; 1285 goto free_node;
1286 1286
1287 bnxt_tc_set_src_fid(bp, flow, src_fid); 1287 bnxt_tc_set_src_fid(bp, flow, src_fid);
1288 1288 bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1289 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1290 bnxt_tc_set_flow_dir(bp, flow, src_fid);
1291 1289
1292 if (!bnxt_tc_can_offload(bp, flow)) { 1290 if (!bnxt_tc_can_offload(bp, flow)) {
1293 rc = -EOPNOTSUPP; 1291 rc = -EOPNOTSUPP;
@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1407 * 2. 15th bit of flow_handle must specify the flow 1405 * 2. 15th bit of flow_handle must specify the flow
1408 * direction (TX/RX). 1406 * direction (TX/RX).
1409 */ 1407 */
1410 if (flow_node->flow.dir == BNXT_DIR_RX) 1408 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1411 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | 1409 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1412 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; 1410 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1413 else 1411 else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index ffec57d1a5ec..4f05305052f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
23 __be16 inner_vlan_tci; 23 __be16 inner_vlan_tci;
24 __be16 ether_type; 24 __be16 ether_type;
25 u8 num_vlans; 25 u8 num_vlans;
26 u8 dir;
27#define BNXT_DIR_RX 1
28#define BNXT_DIR_TX 0
26}; 29};
27 30
28struct bnxt_tc_l3_key { 31struct bnxt_tc_l3_key {
@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
98 101
99 /* flow applicable to pkts ingressing on this fid */ 102 /* flow applicable to pkts ingressing on this fid */
100 u16 src_fid; 103 u16 src_fid;
101 u8 dir;
102#define BNXT_DIR_RX 1
103#define BNXT_DIR_TX 0
104 struct bnxt_tc_l2_key l2_key; 104 struct bnxt_tc_l2_key l2_key;
105 struct bnxt_tc_l2_key l2_mask; 105 struct bnxt_tc_l2_key l2_mask;
106 struct bnxt_tc_l3_key l3_key; 106 struct bnxt_tc_l3_key l3_key;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a2b57807453b..b22196880d6d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
1124 .set_coalesce = bcmgenet_set_coalesce, 1124 .set_coalesce = bcmgenet_set_coalesce,
1125 .get_link_ksettings = bcmgenet_get_link_ksettings, 1125 .get_link_ksettings = bcmgenet_get_link_ksettings,
1126 .set_link_ksettings = bcmgenet_set_link_ksettings, 1126 .set_link_ksettings = bcmgenet_set_link_ksettings,
1127 .get_ts_info = ethtool_op_get_ts_info,
1127}; 1128};
1128 1129
1129/* Power down the unimac, based on mode. */ 1130/* Power down the unimac, based on mode. */
@@ -1895,7 +1896,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1895{ 1896{
1896 struct bcmgenet_rx_ring *ring = container_of(napi, 1897 struct bcmgenet_rx_ring *ring = container_of(napi,
1897 struct bcmgenet_rx_ring, napi); 1898 struct bcmgenet_rx_ring, napi);
1898 struct dim_sample dim_sample; 1899 struct dim_sample dim_sample = {};
1899 unsigned int work_done; 1900 unsigned int work_done;
1900 1901
1901 work_done = bcmgenet_desc_rx(ring, budget); 1902 work_done = bcmgenet_desc_rx(ring, budget);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 5ca17e62dc3e..35b59b5edf0f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = {
4154 { .compatible = "cdns,emac", .data = &emac_config }, 4154 { .compatible = "cdns,emac", .data = &emac_config },
4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4157 { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, 4157 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4158 { /* sentinel */ } 4158 { /* sentinel */ }
4159}; 4159};
4160MODULE_DEVICE_TABLE(of, macb_dt_ids); 4160MODULE_DEVICE_TABLE(of, macb_dt_ids);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 73632b843749..b821c9e1604c 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -10,7 +10,7 @@
10 10
11#include "cavium_ptp.h" 11#include "cavium_ptp.h"
12 12
13#define DRV_NAME "Cavium PTP Driver" 13#define DRV_NAME "cavium_ptp"
14 14
15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C 15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E 16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 032224178b64..6dd65f9b347c 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct,
237 } 237 }
238 238
239 oct->num_iqs++; 239 oct->num_iqs++;
240 if (oct->fn_list.enable_io_queues(oct)) 240 if (oct->fn_list.enable_io_queues(oct)) {
241 octeon_delete_instr_queue(oct, iq_no);
241 return 1; 242 return 1;
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index ad22554857bf..acb016834f04 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1381,24 +1381,18 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
1381 u8 *dst) 1381 u8 *dst)
1382{ 1382{
1383 u8 mac[ETH_ALEN]; 1383 u8 mac[ETH_ALEN];
1384 int ret; 1384 u8 *addr;
1385 1385
1386 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), 1386 addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
1387 "mac-address", mac, ETH_ALEN); 1387 if (!addr) {
1388 if (ret)
1389 goto out;
1390
1391 if (!is_valid_ether_addr(mac)) {
1392 dev_err(dev, "MAC address invalid: %pM\n", mac); 1388 dev_err(dev, "MAC address invalid: %pM\n", mac);
1393 ret = -EINVAL; 1389 return -EINVAL;
1394 goto out;
1395 } 1390 }
1396 1391
1397 dev_info(dev, "MAC address set to: %pM\n", mac); 1392 dev_info(dev, "MAC address set to: %pM\n", mac);
1398 1393
1399 memcpy(dst, mac, ETH_ALEN); 1394 ether_addr_copy(dst, mac);
1400out: 1395 return 0;
1401 return ret;
1402} 1396}
1403 1397
1404/* Currently only sets the MAC address. */ 1398/* Currently only sets the MAC address. */
diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
index 20c09cc4b323..60aa45b375b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb/my3126.c
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
@@ -94,7 +94,7 @@ static int my3126_interrupt_handler(struct cphy *cphy)
94 return cphy_cause_link_change; 94 return cphy_cause_link_change;
95} 95}
96 96
97static void my3216_poll(struct work_struct *work) 97static void my3126_poll(struct work_struct *work)
98{ 98{
99 struct cphy *cphy = container_of(work, struct cphy, phy_update.work); 99 struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
100 100
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(struct net_device *dev,
177 return NULL; 177 return NULL;
178 178
179 cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); 179 cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
180 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 180 INIT_DELAYED_WORK(&cphy->phy_update, my3126_poll);
181 cphy->bmsr = 0; 181 cphy->bmsr = 0;
182 182
183 return cphy; 183 return cphy;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 1e82b9efe447..58f89f6a040f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3269,7 +3269,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3269 if (!adapter->regs) { 3269 if (!adapter->regs) {
3270 dev_err(&pdev->dev, "cannot map device registers\n"); 3270 dev_err(&pdev->dev, "cannot map device registers\n");
3271 err = -ENOMEM; 3271 err = -ENOMEM;
3272 goto out_free_adapter; 3272 goto out_free_adapter_nofail;
3273 } 3273 }
3274 3274
3275 adapter->pdev = pdev; 3275 adapter->pdev = pdev;
@@ -3397,6 +3397,9 @@ out_free_dev:
3397 if (adapter->port[i]) 3397 if (adapter->port[i])
3398 free_netdev(adapter->port[i]); 3398 free_netdev(adapter->port[i]);
3399 3399
3400out_free_adapter_nofail:
3401 kfree_skb(adapter->nofail_skb);
3402
3400out_free_adapter: 3403out_free_adapter:
3401 kfree(adapter); 3404 kfree(adapter);
3402 3405
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 02959035ed3f..d692251ee252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
3236 return -ENOMEM; 3236 return -ENOMEM;
3237 3237
3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); 3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
3239 if (err) 3239 if (err) {
3240 kvfree(t);
3240 return err; 3241 return err;
3242 }
3241 3243
3242 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); 3244 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
3243 kvfree(t); 3245 kvfree(t);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 67202b6f352e..4311ad9c84b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5561,7 +5561,6 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5561 char name[IFNAMSIZ]; 5561 char name[IFNAMSIZ];
5562 u32 devcap2; 5562 u32 devcap2;
5563 u16 flags; 5563 u16 flags;
5564 int pos;
5565 5564
5566 /* If we want to instantiate Virtual Functions, then our 5565 /* If we want to instantiate Virtual Functions, then our
5567 * parent bridge's PCI-E needs to support Alternative Routing 5566 * parent bridge's PCI-E needs to support Alternative Routing
@@ -5569,9 +5568,8 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5569 * and above. 5568 * and above.
5570 */ 5569 */
5571 pbridge = pdev->bus->self; 5570 pbridge = pdev->bus->self;
5572 pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP); 5571 pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
5573 pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags); 5572 pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
5574 pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
5575 5573
5576 if ((flags & PCI_EXP_FLAGS_VERS) < 2 || 5574 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5577 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) { 5575 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 312599c6b35a..e447976bdd3e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
67static struct ch_tc_flower_entry *allocate_flower_entry(void) 67static struct ch_tc_flower_entry *allocate_flower_entry(void)
68{ 68{
69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); 69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 spin_lock_init(&new->lock); 70 if (new)
71 spin_lock_init(&new->lock);
71 return new; 72 return new;
72} 73}
73 74
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9dd5ed9a2965..f7fc553356f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7309,7 +7309,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7309 } else { 7309 } else {
7310 unsigned int pack_align; 7310 unsigned int pack_align;
7311 unsigned int ingpad, ingpack; 7311 unsigned int ingpad, ingpack;
7312 unsigned int pcie_cap;
7313 7312
7314 /* T5 introduced the separation of the Free List Padding and 7313 /* T5 introduced the separation of the Free List Padding and
7315 * Packing Boundaries. Thus, we can select a smaller Padding 7314 * Packing Boundaries. Thus, we can select a smaller Padding
@@ -7334,8 +7333,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7334 * multiple of the Maximum Payload Size. 7333 * multiple of the Maximum Payload Size.
7335 */ 7334 */
7336 pack_align = fl_align; 7335 pack_align = fl_align;
7337 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP); 7336 if (pci_is_pcie(adap->pdev)) {
7338 if (pcie_cap) {
7339 unsigned int mps, mps_log; 7337 unsigned int mps, mps_log;
7340 u16 devctl; 7338 u16 devctl;
7341 7339
@@ -7343,9 +7341,8 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7343 * [bits 7:5] encodes sizes as powers of 2 starting at 7341 * [bits 7:5] encodes sizes as powers of 2 starting at
7344 * 128 bytes. 7342 * 128 bytes.
7345 */ 7343 */
7346 pci_read_config_word(adap->pdev, 7344 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
7347 pcie_cap + PCI_EXP_DEVCTL, 7345 &devctl);
7348 &devctl);
7349 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; 7346 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7350 mps = 1 << mps_log; 7347 mps = 1 << mps_log;
7351 if (mps > pack_align) 7348 if (mps > pack_align)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ef5d61d57597..323976c811e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
550 int num = 0, status = 0; 550 int num = 0, status = 0;
551 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 551 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
552 552
553 spin_lock(&adapter->mcc_cq_lock); 553 spin_lock_bh(&adapter->mcc_cq_lock);
554 554
555 while ((compl = be_mcc_compl_get(adapter))) { 555 while ((compl = be_mcc_compl_get(adapter))) {
556 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 556 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
566 if (num) 566 if (num)
567 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 567 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
568 568
569 spin_unlock(&adapter->mcc_cq_lock); 569 spin_unlock_bh(&adapter->mcc_cq_lock);
570 return status; 570 return status;
571} 571}
572 572
@@ -581,9 +581,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
581 if (be_check_error(adapter, BE_ERROR_ANY)) 581 if (be_check_error(adapter, BE_ERROR_ANY))
582 return -EIO; 582 return -EIO;
583 583
584 local_bh_disable();
585 status = be_process_mcc(adapter); 584 status = be_process_mcc(adapter);
586 local_bh_enable();
587 585
588 if (atomic_read(&mcc_obj->q.used) == 0) 586 if (atomic_read(&mcc_obj->q.used) == 0)
589 break; 587 break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b7a246b33599..4d8e40ac66d2 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4698,8 +4698,13 @@ int be_update_queues(struct be_adapter *adapter)
4698 int status; 4698 int status;
4699 4699
4700 if (netif_running(netdev)) { 4700 if (netif_running(netdev)) {
4701 /* be_tx_timeout() must not run concurrently with this
4702 * function, synchronize with an already-running dev_watchdog
4703 */
4704 netif_tx_lock_bh(netdev);
4701 /* device cannot transmit now, avoid dev_watchdog timeouts */ 4705 /* device cannot transmit now, avoid dev_watchdog timeouts */
4702 netif_carrier_off(netdev); 4706 netif_carrier_off(netdev);
4707 netif_tx_unlock_bh(netdev);
4703 4708
4704 be_close(netdev); 4709 be_close(netdev);
4705 } 4710 }
@@ -5625,9 +5630,7 @@ static void be_worker(struct work_struct *work)
5625 * mcc completions 5630 * mcc completions
5626 */ 5631 */
5627 if (!netif_running(adapter->netdev)) { 5632 if (!netif_running(adapter->netdev)) {
5628 local_bh_disable();
5629 be_process_mcc(adapter); 5633 be_process_mcc(adapter);
5630 local_bh_enable();
5631 goto reschedule; 5634 goto reschedule;
5632 } 5635 }
5633 5636
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 133acca0bf31..092da2d90026 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -167,7 +167,7 @@ struct nps_enet_priv {
167}; 167};
168 168
169/** 169/**
170 * nps_reg_set - Sets ENET register with provided value. 170 * nps_enet_reg_set - Sets ENET register with provided value.
171 * @priv: Pointer to EZchip ENET private data structure. 171 * @priv: Pointer to EZchip ENET private data structure.
172 * @reg: Register offset from base address. 172 * @reg: Register offset from base address.
173 * @value: Value to set in register. 173 * @value: Value to set in register.
@@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
179} 179}
180 180
181/** 181/**
182 * nps_reg_get - Gets value of specified ENET register. 182 * nps_enet_reg_get - Gets value of specified ENET register.
183 * @priv: Pointer to EZchip ENET private data structure. 183 * @priv: Pointer to EZchip ENET private data structure.
184 * @reg: Register offset from base address. 184 * @reg: Register offset from base address.
185 * 185 *
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index ed0d010c7cf2..04a59db03f2b 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
2config FSL_ENETC 2config FSL_ENETC
3 tristate "ENETC PF driver" 3 tristate "ENETC PF driver"
4 depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) 4 depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
5 select PHYLIB
5 help 6 help
6 This driver supports NXP ENETC gigabit ethernet controller PCIe 7 This driver supports NXP ENETC gigabit ethernet controller PCIe
7 physical function (PF) devices, managing ENETC Ports at a privileged 8 physical function (PF) devices, managing ENETC Ports at a privileged
@@ -12,6 +13,7 @@ config FSL_ENETC
12config FSL_ENETC_VF 13config FSL_ENETC_VF
13 tristate "ENETC VF driver" 14 tristate "ENETC VF driver"
14 depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) 15 depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
16 select PHYLIB
15 help 17 help
16 This driver supports NXP ENETC gigabit ethernet controller PCIe 18 This driver supports NXP ENETC gigabit ethernet controller PCIe
17 virtual function (VF) devices enabled by the ENETC PF driver. 19 virtual function (VF) devices enabled by the ENETC PF driver.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 2fd2586e42bf..bc594892507a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
83 if (n != 1) { 83 if (n != 1) {
84 err = -EPERM; 84 err = -EPERM;
85 goto err_irq; 85 goto err_irq_vectors;
86 } 86 }
87 87
88 ptp_qoriq->irq = pci_irq_vector(pdev, 0); 88 ptp_qoriq->irq = pci_irq_vector(pdev, 0);
@@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
107err_no_clock: 107err_no_clock:
108 free_irq(ptp_qoriq->irq, ptp_qoriq); 108 free_irq(ptp_qoriq->irq, ptp_qoriq);
109err_irq: 109err_irq:
110 pci_free_irq_vectors(pdev);
111err_irq_vectors:
110 iounmap(base); 112 iounmap(base);
111err_ioremap: 113err_ioremap:
112 kfree(ptp_qoriq); 114 kfree(ptp_qoriq);
@@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
125 127
126 enetc_phc_index = -1; 128 enetc_phc_index = -1;
127 ptp_qoriq_free(ptp_qoriq); 129 ptp_qoriq_free(ptp_qoriq);
130 pci_free_irq_vectors(pdev);
128 kfree(ptp_qoriq); 131 kfree(ptp_qoriq);
129 132
130 pci_release_mem_regions(pdev); 133 pci_release_mem_regions(pdev);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index e80fedb27cee..210749bf1eac 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2439,9 +2439,6 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
2439 * buffers when not using jumbo frames. 2439 * buffers when not using jumbo frames.
2440 * Must be large enough to accommodate the network MTU, but small enough 2440 * Must be large enough to accommodate the network MTU, but small enough
2441 * to avoid wasting skb memory. 2441 * to avoid wasting skb memory.
2442 *
2443 * Could be overridden once, at boot-time, via the
2444 * fm_set_max_frm() callback.
2445 */ 2442 */
2446static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; 2443static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
2447module_param(fsl_fm_max_frm, int, 0); 2444module_param(fsl_fm_max_frm, int, 0);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 92372dc43be8..ebc37e256922 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -31,9 +31,6 @@
31struct gve_rx_desc_queue { 31struct gve_rx_desc_queue {
32 struct gve_rx_desc *desc_ring; /* the descriptor ring */ 32 struct gve_rx_desc *desc_ring; /* the descriptor ring */
33 dma_addr_t bus; /* the bus for the desc_ring */ 33 dma_addr_t bus; /* the bus for the desc_ring */
34 u32 cnt; /* free-running total number of completed packets */
35 u32 fill_cnt; /* free-running total number of descriptors posted */
36 u32 mask; /* masks the cnt to the size of the ring */
37 u8 seqno; /* the next expected seqno for this desc*/ 34 u8 seqno; /* the next expected seqno for this desc*/
38}; 35};
39 36
@@ -60,8 +57,6 @@ struct gve_rx_data_queue {
60 dma_addr_t data_bus; /* dma mapping of the slots */ 57 dma_addr_t data_bus; /* dma mapping of the slots */
61 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ 58 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
62 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ 59 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
63 u32 mask; /* masks the cnt to the size of the ring */
64 u32 cnt; /* free-running total number of completed packets */
65}; 60};
66 61
67struct gve_priv; 62struct gve_priv;
@@ -73,6 +68,9 @@ struct gve_rx_ring {
73 struct gve_rx_data_queue data; 68 struct gve_rx_data_queue data;
74 u64 rbytes; /* free-running bytes received */ 69 u64 rbytes; /* free-running bytes received */
75 u64 rpackets; /* free-running packets received */ 70 u64 rpackets; /* free-running packets received */
71 u32 cnt; /* free-running total number of completed packets */
72 u32 fill_cnt; /* free-running total number of descs and buffs posted */
73 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
76 u32 q_num; /* queue index */ 74 u32 q_num; /* queue index */
77 u32 ntfy_id; /* notification block index */ 75 u32 ntfy_id; /* notification block index */
78 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 76 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 26540b856541..d8fa816f4473 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -138,8 +138,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
138 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 138 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
139 struct gve_rx_ring *rx = &priv->rx[ring]; 139 struct gve_rx_ring *rx = &priv->rx[ring];
140 140
141 data[i++] = rx->desc.cnt; 141 data[i++] = rx->cnt;
142 data[i++] = rx->desc.fill_cnt; 142 data[i++] = rx->fill_cnt;
143 } 143 }
144 } else { 144 } else {
145 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 145 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 497298752381..aca95f64bde8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
50 u64_stats_fetch_begin(&priv->tx[ring].statss); 50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done; 51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done; 52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 start)); 54 start));
55 } 55 }
56 } 56 }
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 1914b8350da7..59564ac99d2a 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -37,7 +37,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
37 rx->data.qpl = NULL; 37 rx->data.qpl = NULL;
38 kvfree(rx->data.page_info); 38 kvfree(rx->data.page_info);
39 39
40 slots = rx->data.mask + 1; 40 slots = rx->mask + 1;
41 bytes = sizeof(*rx->data.data_ring) * slots; 41 bytes = sizeof(*rx->data.data_ring) * slots;
42 dma_free_coherent(dev, bytes, rx->data.data_ring, 42 dma_free_coherent(dev, bytes, rx->data.data_ring,
43 rx->data.data_bus); 43 rx->data.data_bus);
@@ -64,7 +64,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
64 /* Allocate one page per Rx queue slot. Each page is split into two 64 /* Allocate one page per Rx queue slot. Each page is split into two
65 * packet buffers, when possible we "page flip" between the two. 65 * packet buffers, when possible we "page flip" between the two.
66 */ 66 */
67 slots = rx->data.mask + 1; 67 slots = rx->mask + 1;
68 68
69 rx->data.page_info = kvzalloc(slots * 69 rx->data.page_info = kvzalloc(slots *
70 sizeof(*rx->data.page_info), GFP_KERNEL); 70 sizeof(*rx->data.page_info), GFP_KERNEL);
@@ -111,7 +111,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
111 rx->q_num = idx; 111 rx->q_num = idx;
112 112
113 slots = priv->rx_pages_per_qpl; 113 slots = priv->rx_pages_per_qpl;
114 rx->data.mask = slots - 1; 114 rx->mask = slots - 1;
115 115
116 /* alloc rx data ring */ 116 /* alloc rx data ring */
117 bytes = sizeof(*rx->data.data_ring) * slots; 117 bytes = sizeof(*rx->data.data_ring) * slots;
@@ -125,7 +125,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
125 err = -ENOMEM; 125 err = -ENOMEM;
126 goto abort_with_slots; 126 goto abort_with_slots;
127 } 127 }
128 rx->desc.fill_cnt = filled_pages; 128 rx->fill_cnt = filled_pages;
129 /* Ensure data ring slots (packet buffers) are visible. */ 129 /* Ensure data ring slots (packet buffers) are visible. */
130 dma_wmb(); 130 dma_wmb();
131 131
@@ -156,8 +156,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
156 err = -ENOMEM; 156 err = -ENOMEM;
157 goto abort_with_q_resources; 157 goto abort_with_q_resources;
158 } 158 }
159 rx->desc.mask = slots - 1; 159 rx->mask = slots - 1;
160 rx->desc.cnt = 0; 160 rx->cnt = 0;
161 rx->desc.seqno = 1; 161 rx->desc.seqno = 1;
162 gve_rx_add_to_block(priv, idx); 162 gve_rx_add_to_block(priv, idx);
163 163
@@ -213,7 +213,7 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
213{ 213{
214 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); 214 u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
215 215
216 iowrite32be(rx->desc.fill_cnt, &priv->db_bar2[db_idx]); 216 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
217} 217}
218 218
219static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) 219static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
@@ -273,7 +273,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info,
273} 273}
274 274
275static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, 275static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
276 netdev_features_t feat) 276 netdev_features_t feat, u32 idx)
277{ 277{
278 struct gve_rx_slot_page_info *page_info; 278 struct gve_rx_slot_page_info *page_info;
279 struct gve_priv *priv = rx->gve; 279 struct gve_priv *priv = rx->gve;
@@ -282,14 +282,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
282 struct sk_buff *skb; 282 struct sk_buff *skb;
283 int pagecount; 283 int pagecount;
284 u16 len; 284 u16 len;
285 u32 idx;
286 285
287 /* drop this packet */ 286 /* drop this packet */
288 if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) 287 if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
289 return true; 288 return true;
290 289
291 len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; 290 len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
292 idx = rx->data.cnt & rx->data.mask;
293 page_info = &rx->data.page_info[idx]; 291 page_info = &rx->data.page_info[idx];
294 292
295 /* gvnic can only receive into registered segments. If the buffer 293 /* gvnic can only receive into registered segments. If the buffer
@@ -340,8 +338,6 @@ have_skb:
340 if (!skb) 338 if (!skb)
341 return true; 339 return true;
342 340
343 rx->data.cnt++;
344
345 if (likely(feat & NETIF_F_RXCSUM)) { 341 if (likely(feat & NETIF_F_RXCSUM)) {
346 /* NIC passes up the partial sum */ 342 /* NIC passes up the partial sum */
347 if (rx_desc->csum) 343 if (rx_desc->csum)
@@ -370,7 +366,7 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx)
370 __be16 flags_seq; 366 __be16 flags_seq;
371 u32 next_idx; 367 u32 next_idx;
372 368
373 next_idx = rx->desc.cnt & rx->desc.mask; 369 next_idx = rx->cnt & rx->mask;
374 desc = rx->desc.desc_ring + next_idx; 370 desc = rx->desc.desc_ring + next_idx;
375 371
376 flags_seq = desc->flags_seq; 372 flags_seq = desc->flags_seq;
@@ -385,8 +381,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
385{ 381{
386 struct gve_priv *priv = rx->gve; 382 struct gve_priv *priv = rx->gve;
387 struct gve_rx_desc *desc; 383 struct gve_rx_desc *desc;
388 u32 cnt = rx->desc.cnt; 384 u32 cnt = rx->cnt;
389 u32 idx = cnt & rx->desc.mask; 385 u32 idx = cnt & rx->mask;
390 u32 work_done = 0; 386 u32 work_done = 0;
391 u64 bytes = 0; 387 u64 bytes = 0;
392 388
@@ -401,10 +397,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
401 rx->q_num, GVE_SEQNO(desc->flags_seq), 397 rx->q_num, GVE_SEQNO(desc->flags_seq),
402 rx->desc.seqno); 398 rx->desc.seqno);
403 bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; 399 bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
404 if (!gve_rx(rx, desc, feat)) 400 if (!gve_rx(rx, desc, feat, idx))
405 gve_schedule_reset(priv); 401 gve_schedule_reset(priv);
406 cnt++; 402 cnt++;
407 idx = cnt & rx->desc.mask; 403 idx = cnt & rx->mask;
408 desc = rx->desc.desc_ring + idx; 404 desc = rx->desc.desc_ring + idx;
409 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); 405 rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
410 work_done++; 406 work_done++;
@@ -417,8 +413,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
417 rx->rpackets += work_done; 413 rx->rpackets += work_done;
418 rx->rbytes += bytes; 414 rx->rbytes += bytes;
419 u64_stats_update_end(&rx->statss); 415 u64_stats_update_end(&rx->statss);
420 rx->desc.cnt = cnt; 416 rx->cnt = cnt;
421 rx->desc.fill_cnt += work_done; 417 rx->fill_cnt += work_done;
422 418
423 /* restock desc ring slots */ 419 /* restock desc ring slots */
424 dma_wmb(); /* Ensure descs are visible before ringing doorbell */ 420 dma_wmb(); /* Ensure descs are visible before ringing doorbell */
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d60452845539..c84167447abe 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -220,6 +220,7 @@ struct hip04_priv {
220 unsigned int reg_inten; 220 unsigned int reg_inten;
221 221
222 struct napi_struct napi; 222 struct napi_struct napi;
223 struct device *dev;
223 struct net_device *ndev; 224 struct net_device *ndev;
224 225
225 struct tx_desc *tx_desc; 226 struct tx_desc *tx_desc;
@@ -248,7 +249,7 @@ struct hip04_priv {
248 249
249static inline unsigned int tx_count(unsigned int head, unsigned int tail) 250static inline unsigned int tx_count(unsigned int head, unsigned int tail)
250{ 251{
251 return (head - tail) % (TX_DESC_NUM - 1); 252 return (head - tail) % TX_DESC_NUM;
252} 253}
253 254
254static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) 255static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -465,7 +466,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
465 } 466 }
466 467
467 if (priv->tx_phys[tx_tail]) { 468 if (priv->tx_phys[tx_tail]) {
468 dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], 469 dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
469 priv->tx_skb[tx_tail]->len, 470 priv->tx_skb[tx_tail]->len,
470 DMA_TO_DEVICE); 471 DMA_TO_DEVICE);
471 priv->tx_phys[tx_tail] = 0; 472 priv->tx_phys[tx_tail] = 0;
@@ -516,8 +517,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
516 return NETDEV_TX_BUSY; 517 return NETDEV_TX_BUSY;
517 } 518 }
518 519
519 phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); 520 phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
520 if (dma_mapping_error(&ndev->dev, phys)) { 521 if (dma_mapping_error(priv->dev, phys)) {
521 dev_kfree_skb(skb); 522 dev_kfree_skb(skb);
522 return NETDEV_TX_OK; 523 return NETDEV_TX_OK;
523 } 524 }
@@ -585,6 +586,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
585 u16 len; 586 u16 len;
586 u32 err; 587 u32 err;
587 588
589 /* clean up tx descriptors */
590 tx_remaining = hip04_tx_reclaim(ndev, false);
591
588 while (cnt && !last) { 592 while (cnt && !last) {
589 buf = priv->rx_buf[priv->rx_head]; 593 buf = priv->rx_buf[priv->rx_head];
590 skb = build_skb(buf, priv->rx_buf_size); 594 skb = build_skb(buf, priv->rx_buf_size);
@@ -593,7 +597,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
593 goto refill; 597 goto refill;
594 } 598 }
595 599
596 dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], 600 dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
597 RX_BUF_SIZE, DMA_FROM_DEVICE); 601 RX_BUF_SIZE, DMA_FROM_DEVICE);
598 priv->rx_phys[priv->rx_head] = 0; 602 priv->rx_phys[priv->rx_head] = 0;
599 603
@@ -622,9 +626,9 @@ refill:
622 buf = netdev_alloc_frag(priv->rx_buf_size); 626 buf = netdev_alloc_frag(priv->rx_buf_size);
623 if (!buf) 627 if (!buf)
624 goto done; 628 goto done;
625 phys = dma_map_single(&ndev->dev, buf, 629 phys = dma_map_single(priv->dev, buf,
626 RX_BUF_SIZE, DMA_FROM_DEVICE); 630 RX_BUF_SIZE, DMA_FROM_DEVICE);
627 if (dma_mapping_error(&ndev->dev, phys)) 631 if (dma_mapping_error(priv->dev, phys))
628 goto done; 632 goto done;
629 priv->rx_buf[priv->rx_head] = buf; 633 priv->rx_buf[priv->rx_head] = buf;
630 priv->rx_phys[priv->rx_head] = phys; 634 priv->rx_phys[priv->rx_head] = phys;
@@ -645,8 +649,7 @@ refill:
645 } 649 }
646 napi_complete_done(napi, rx); 650 napi_complete_done(napi, rx);
647done: 651done:
648 /* clean up tx descriptors and start a new timer if necessary */ 652 /* start a new timer if necessary */
649 tx_remaining = hip04_tx_reclaim(ndev, false);
650 if (rx < budget && tx_remaining) 653 if (rx < budget && tx_remaining)
651 hip04_start_tx_timer(priv); 654 hip04_start_tx_timer(priv);
652 655
@@ -728,9 +731,9 @@ static int hip04_mac_open(struct net_device *ndev)
728 for (i = 0; i < RX_DESC_NUM; i++) { 731 for (i = 0; i < RX_DESC_NUM; i++) {
729 dma_addr_t phys; 732 dma_addr_t phys;
730 733
731 phys = dma_map_single(&ndev->dev, priv->rx_buf[i], 734 phys = dma_map_single(priv->dev, priv->rx_buf[i],
732 RX_BUF_SIZE, DMA_FROM_DEVICE); 735 RX_BUF_SIZE, DMA_FROM_DEVICE);
733 if (dma_mapping_error(&ndev->dev, phys)) 736 if (dma_mapping_error(priv->dev, phys))
734 return -EIO; 737 return -EIO;
735 738
736 priv->rx_phys[i] = phys; 739 priv->rx_phys[i] = phys;
@@ -764,7 +767,7 @@ static int hip04_mac_stop(struct net_device *ndev)
764 767
765 for (i = 0; i < RX_DESC_NUM; i++) { 768 for (i = 0; i < RX_DESC_NUM; i++) {
766 if (priv->rx_phys[i]) { 769 if (priv->rx_phys[i]) {
767 dma_unmap_single(&ndev->dev, priv->rx_phys[i], 770 dma_unmap_single(priv->dev, priv->rx_phys[i],
768 RX_BUF_SIZE, DMA_FROM_DEVICE); 771 RX_BUF_SIZE, DMA_FROM_DEVICE);
769 priv->rx_phys[i] = 0; 772 priv->rx_phys[i] = 0;
770 } 773 }
@@ -907,6 +910,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
907 return -ENOMEM; 910 return -ENOMEM;
908 911
909 priv = netdev_priv(ndev); 912 priv = netdev_priv(ndev);
913 priv->dev = d;
910 priv->ndev = ndev; 914 priv->ndev = ndev;
911 platform_set_drvdata(pdev, ndev); 915 platform_set_drvdata(pdev, ndev);
912 SET_NETDEV_DEV(ndev, &pdev->dev); 916 SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 8ad5292eebbe..75329ab775a6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -43,7 +43,7 @@ enum HCLGE_MBX_OPCODE {
43 HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ 43 HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
44 HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ 44 HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
45 HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ 45 HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
46 HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ 46 HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
47 HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ 47 HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
48 48
49 HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ 49 HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a38ac7cfe16b..690b9990215c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -304,7 +304,7 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
304 memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); 304 memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
305 305
306 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 306 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
307 HLCGE_MBX_PUSH_VLAN_INFO, vfid); 307 HCLGE_MBX_PUSH_VLAN_INFO, vfid);
308} 308}
309 309
310static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 310static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index f60b80bd605e..6a96987bd8f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -204,7 +204,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
204 case HCLGE_MBX_LINK_STAT_CHANGE: 204 case HCLGE_MBX_LINK_STAT_CHANGE:
205 case HCLGE_MBX_ASSERTING_RESET: 205 case HCLGE_MBX_ASSERTING_RESET:
206 case HCLGE_MBX_LINK_STAT_MODE: 206 case HCLGE_MBX_LINK_STAT_MODE:
207 case HLCGE_MBX_PUSH_VLAN_INFO: 207 case HCLGE_MBX_PUSH_VLAN_INFO:
208 /* set this mbx event as pending. This is required as we 208 /* set this mbx event as pending. This is required as we
209 * might loose interrupt event when mbx task is busy 209 * might loose interrupt event when mbx task is busy
210 * handling. This shall be cleared when mbx task just 210 * handling. This shall be cleared when mbx task just
@@ -307,7 +307,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
307 hclgevf_reset_task_schedule(hdev); 307 hclgevf_reset_task_schedule(hdev);
308 308
309 break; 309 break;
310 case HLCGE_MBX_PUSH_VLAN_INFO: 310 case HCLGE_MBX_PUSH_VLAN_INFO:
311 state = le16_to_cpu(msg_q[1]); 311 state = le16_to_cpu(msg_q[1]);
312 vlan_info = &msg_q[1]; 312 vlan_info = &msg_q[1];
313 hclgevf_update_port_base_vlan_info(hdev, state, 313 hclgevf_update_port_base_vlan_info(hdev, state,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 4138a8480347..cca71ba7a74a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3251,7 +3251,7 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3251 switch (action) { 3251 switch (action) {
3252 case MEM_CANCEL_OFFLINE: 3252 case MEM_CANCEL_OFFLINE:
3253 pr_info("memory offlining canceled"); 3253 pr_info("memory offlining canceled");
3254 /* Fall through: re-add canceled memory block */ 3254 /* Fall through - re-add canceled memory block */
3255 3255
3256 case MEM_ONLINE: 3256 case MEM_ONLINE:
3257 pr_info("memory is going online"); 3257 pr_info("memory is going online");
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index d654c234aaf7..c5be4ebd8437 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1605 struct net_device *netdev; 1605 struct net_device *netdev;
1606 struct ibmveth_adapter *adapter; 1606 struct ibmveth_adapter *adapter;
1607 unsigned char *mac_addr_p; 1607 unsigned char *mac_addr_p;
1608 unsigned int *mcastFilterSize_p; 1608 __be32 *mcastFilterSize_p;
1609 long ret; 1609 long ret;
1610 unsigned long ret_attr; 1610 unsigned long ret_attr;
1611 1611
@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1627 return -EINVAL; 1627 return -EINVAL;
1628 } 1628 }
1629 1629
1630 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, 1630 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1631 VETH_MCAST_FILTER_SIZE, NULL); 1631 VETH_MCAST_FILTER_SIZE,
1632 NULL);
1632 if (!mcastFilterSize_p) { 1633 if (!mcastFilterSize_p) {
1633 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " 1634 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1634 "attribute\n"); 1635 "attribute\n");
@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1645 1646
1646 adapter->vdev = dev; 1647 adapter->vdev = dev;
1647 adapter->netdev = netdev; 1648 adapter->netdev = netdev;
1648 adapter->mcastFilterSize = *mcastFilterSize_p; 1649 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1649 adapter->pool_config = 0; 1650 adapter->pool_config = 0;
1650 1651
1651 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1652 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3da680073265..fa4bb940665c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma, 1569 (u64)tx_buff->indir_dma,
1570 (u64)num_entries); 1570 (u64)num_entries);
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1571 } else { 1573 } else {
1572 tx_buff->num_entries = num_entries; 1574 tx_buff->num_entries = num_entries;
1573 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -1981,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work)
1981 1983
1982 rwi = get_next_rwi(adapter); 1984 rwi = get_next_rwi(adapter);
1983 while (rwi) { 1985 while (rwi) {
1986 if (adapter->state == VNIC_REMOVING ||
1987 adapter->state == VNIC_REMOVED)
1988 goto out;
1989
1984 if (adapter->force_reset_recovery) { 1990 if (adapter->force_reset_recovery) {
1985 adapter->force_reset_recovery = false; 1991 adapter->force_reset_recovery = false;
1986 rc = do_hard_reset(adapter, rwi, reset_state); 1992 rc = do_hard_reset(adapter, rwi, reset_state);
@@ -2005,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2005 netdev_dbg(adapter->netdev, "Reset failed\n"); 2011 netdev_dbg(adapter->netdev, "Reset failed\n");
2006 free_all_rwi(adapter); 2012 free_all_rwi(adapter);
2007 } 2013 }
2008 2014out:
2009 adapter->resetting = false; 2015 adapter->resetting = false;
2010 if (we_lock_rtnl) 2016 if (we_lock_rtnl)
2011 rtnl_unlock(); 2017 rtnl_unlock();
@@ -2788,7 +2794,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2788 union sub_crq *next; 2794 union sub_crq *next;
2789 int index; 2795 int index;
2790 int i, j; 2796 int i, j;
2791 u8 *first;
2792 2797
2793restart_loop: 2798restart_loop:
2794 while (pending_scrq(adapter, scrq)) { 2799 while (pending_scrq(adapter, scrq)) {
@@ -2818,14 +2823,6 @@ restart_loop:
2818 2823
2819 txbuff->data_dma[j] = 0; 2824 txbuff->data_dma[j] = 0;
2820 } 2825 }
2821 /* if sub_crq was sent indirectly */
2822 first = &txbuff->indir_arr[0].generic.first;
2823 if (*first == IBMVNIC_CRQ_CMD) {
2824 dma_unmap_single(dev, txbuff->indir_dma,
2825 sizeof(txbuff->indir_arr),
2826 DMA_TO_DEVICE);
2827 *first = 0;
2828 }
2829 2826
2830 if (txbuff->last_frag) { 2827 if (txbuff->last_frag) {
2831 dev_kfree_skb_any(txbuff->skb); 2828 dev_kfree_skb_any(txbuff->skb);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 93f3b4e6185b..aa9323e55406 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3912,13 +3912,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
3912s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 3912s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3913{ 3913{
3914 struct igc_adapter *adapter = hw->back; 3914 struct igc_adapter *adapter = hw->back;
3915 u16 cap_offset;
3916 3915
3917 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 3916 if (!pci_is_pcie(adapter->pdev))
3918 if (!cap_offset)
3919 return -IGC_ERR_CONFIG; 3917 return -IGC_ERR_CONFIG;
3920 3918
3921 pci_read_config_word(adapter->pdev, cap_offset + reg, value); 3919 pcie_capability_read_word(adapter->pdev, reg, value);
3922 3920
3923 return IGC_SUCCESS; 3921 return IGC_SUCCESS;
3924} 3922}
@@ -3926,13 +3924,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3926s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 3924s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3927{ 3925{
3928 struct igc_adapter *adapter = hw->back; 3926 struct igc_adapter *adapter = hw->back;
3929 u16 cap_offset;
3930 3927
3931 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 3928 if (!pci_is_pcie(adapter->pdev))
3932 if (!cap_offset)
3933 return -IGC_ERR_CONFIG; 3929 return -IGC_ERR_CONFIG;
3934 3930
3935 pci_write_config_word(adapter->pdev, cap_offset + reg, *value); 3931 pcie_capability_write_word(adapter->pdev, reg, *value);
3936 3932
3937 return IGC_SUCCESS; 3933 return IGC_SUCCESS;
3938} 3934}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbaf712d6529..7882148abb43 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7897,11 +7897,8 @@ static void ixgbe_service_task(struct work_struct *work)
7897 return; 7897 return;
7898 } 7898 }
7899 if (ixgbe_check_fw_error(adapter)) { 7899 if (ixgbe_check_fw_error(adapter)) {
7900 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 7900 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7901 rtnl_lock();
7902 unregister_netdev(adapter->netdev); 7901 unregister_netdev(adapter->netdev);
7903 rtnl_unlock();
7904 }
7905 ixgbe_service_event_complete(adapter); 7902 ixgbe_service_event_complete(adapter);
7906 return; 7903 return;
7907 } 7904 }
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index f660cc2b8258..0b9e851f3da4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -319,20 +319,33 @@ static int orion_mdio_probe(struct platform_device *pdev)
319 319
320 init_waitqueue_head(&dev->smi_busy_wait); 320 init_waitqueue_head(&dev->smi_busy_wait);
321 321
322 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { 322 if (pdev->dev.of_node) {
323 dev->clk[i] = of_clk_get(pdev->dev.of_node, i); 323 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
324 if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { 324 dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
325 if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
326 ret = -EPROBE_DEFER;
327 goto out_clk;
328 }
329 if (IS_ERR(dev->clk[i]))
330 break;
331 clk_prepare_enable(dev->clk[i]);
332 }
333
334 if (!IS_ERR(of_clk_get(pdev->dev.of_node,
335 ARRAY_SIZE(dev->clk))))
336 dev_warn(&pdev->dev,
337 "unsupported number of clocks, limiting to the first "
338 __stringify(ARRAY_SIZE(dev->clk)) "\n");
339 } else {
340 dev->clk[0] = clk_get(&pdev->dev, NULL);
341 if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
325 ret = -EPROBE_DEFER; 342 ret = -EPROBE_DEFER;
326 goto out_clk; 343 goto out_clk;
327 } 344 }
328 if (IS_ERR(dev->clk[i])) 345 if (!IS_ERR(dev->clk[0]))
329 break; 346 clk_prepare_enable(dev->clk[0]);
330 clk_prepare_enable(dev->clk[i]);
331 } 347 }
332 348
333 if (!IS_ERR(of_clk_get(pdev->dev.of_node, ARRAY_SIZE(dev->clk))))
334 dev_warn(&pdev->dev, "unsupported number of clocks, limiting to the first "
335 __stringify(ARRAY_SIZE(dev->clk)) "\n");
336 349
337 dev->err_interrupt = platform_get_irq(pdev, 0); 350 dev->err_interrupt = platform_get_irq(pdev, 0);
338 if (dev->err_interrupt > 0 && 351 if (dev->err_interrupt > 0 &&
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index c51f1d5b550b..ccdd47f3b8fb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -811,6 +811,26 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
811 return 0; 811 return 0;
812} 812}
813 813
814static void mvpp2_set_hw_csum(struct mvpp2_port *port,
815 enum mvpp2_bm_pool_log_num new_long_pool)
816{
817 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
818
819 /* Update L4 checksum when jumbo enable/disable on port.
820 * Only port 0 supports hardware checksum offload due to
821 * the Tx FIFO size limitation.
822 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
823 * has 7 bits, so the maximum L3 offset is 128.
824 */
825 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
826 port->dev->features &= ~csums;
827 port->dev->hw_features &= ~csums;
828 } else {
829 port->dev->features |= csums;
830 port->dev->hw_features |= csums;
831 }
832}
833
814static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 834static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
815{ 835{
816 struct mvpp2_port *port = netdev_priv(dev); 836 struct mvpp2_port *port = netdev_priv(dev);
@@ -843,15 +863,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
843 /* Add port to new short & long pool */ 863 /* Add port to new short & long pool */
844 mvpp2_swf_bm_pool_init(port); 864 mvpp2_swf_bm_pool_init(port);
845 865
846 /* Update L4 checksum when jumbo enable/disable on port */ 866 mvpp2_set_hw_csum(port, new_long_pool);
847 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
848 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
849 dev->hw_features &= ~(NETIF_F_IP_CSUM |
850 NETIF_F_IPV6_CSUM);
851 } else {
852 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
853 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
854 }
855 } 867 }
856 868
857 dev->mtu = mtu; 869 dev->mtu = mtu;
@@ -3700,6 +3712,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
3700static int mvpp2_change_mtu(struct net_device *dev, int mtu) 3712static int mvpp2_change_mtu(struct net_device *dev, int mtu)
3701{ 3713{
3702 struct mvpp2_port *port = netdev_priv(dev); 3714 struct mvpp2_port *port = netdev_priv(dev);
3715 bool running = netif_running(dev);
3703 int err; 3716 int err;
3704 3717
3705 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 3718 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3708,40 +3721,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
3708 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 3721 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
3709 } 3722 }
3710 3723
3711 if (!netif_running(dev)) { 3724 if (running)
3712 err = mvpp2_bm_update_mtu(dev, mtu); 3725 mvpp2_stop_dev(port);
3713 if (!err) {
3714 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3715 return 0;
3716 }
3717
3718 /* Reconfigure BM to the original MTU */
3719 err = mvpp2_bm_update_mtu(dev, dev->mtu);
3720 if (err)
3721 goto log_error;
3722 }
3723
3724 mvpp2_stop_dev(port);
3725 3726
3726 err = mvpp2_bm_update_mtu(dev, mtu); 3727 err = mvpp2_bm_update_mtu(dev, mtu);
3727 if (!err) { 3728 if (err) {
3729 netdev_err(dev, "failed to change MTU\n");
3730 /* Reconfigure BM to the original MTU */
3731 mvpp2_bm_update_mtu(dev, dev->mtu);
3732 } else {
3728 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3733 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3729 goto out_start;
3730 } 3734 }
3731 3735
3732 /* Reconfigure BM to the original MTU */ 3736 if (running) {
3733 err = mvpp2_bm_update_mtu(dev, dev->mtu); 3737 mvpp2_start_dev(port);
3734 if (err) 3738 mvpp2_egress_enable(port);
3735 goto log_error; 3739 mvpp2_ingress_enable(port);
3736 3740 }
3737out_start:
3738 mvpp2_start_dev(port);
3739 mvpp2_egress_enable(port);
3740 mvpp2_ingress_enable(port);
3741 3741
3742 return 0;
3743log_error:
3744 netdev_err(dev, "failed to change MTU\n");
3745 return err; 3742 return err;
3746} 3743}
3747 3744
@@ -4739,9 +4736,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
4739 else 4736 else
4740 ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 4737 ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4741 4738
4742 ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; 4739 ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
4743 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | 4740 MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
4744 MVPP22_XLG_CTRL4_EN_IDLE_CHECK; 4741 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4745 4742
4746 if (old_ctrl0 != ctrl0) 4743 if (old_ctrl0 != ctrl0)
4747 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); 4744 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
@@ -5208,10 +5205,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
5208 dev->features |= NETIF_F_NTUPLE; 5205 dev->features |= NETIF_F_NTUPLE;
5209 } 5206 }
5210 5207
5211 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { 5208 mvpp2_set_hw_csum(port, port->pool_long->id);
5212 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5213 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5214 }
5215 5209
5216 dev->vlan_features |= features; 5210 dev->vlan_features |= features;
5217 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; 5211 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
@@ -5759,9 +5753,6 @@ static int mvpp2_remove(struct platform_device *pdev)
5759 5753
5760 mvpp2_dbgfs_cleanup(priv); 5754 mvpp2_dbgfs_cleanup(priv);
5761 5755
5762 flush_workqueue(priv->stats_queue);
5763 destroy_workqueue(priv->stats_queue);
5764
5765 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5756 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5766 if (priv->port_list[i]) { 5757 if (priv->port_list[i]) {
5767 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 5758 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -5770,6 +5761,8 @@ static int mvpp2_remove(struct platform_device *pdev)
5770 i++; 5761 i++;
5771 } 5762 }
5772 5763
5764 destroy_workqueue(priv->stats_queue);
5765
5773 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5766 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5774 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 5767 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
5775 5768
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f518312ffe69..e0363870f3a5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,20 @@ static const struct dmi_system_id msi_blacklist[] = {
4924 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), 4924 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
4925 }, 4925 },
4926 }, 4926 },
4927 {
4928 .ident = "ASUS P6T",
4929 .matches = {
4930 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4931 DMI_MATCH(DMI_BOARD_NAME, "P6T"),
4932 },
4933 },
4934 {
4935 .ident = "ASUS P6X",
4936 .matches = {
4937 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4938 DMI_MATCH(DMI_BOARD_NAME, "P6X"),
4939 },
4940 },
4927 {} 4941 {}
4928}; 4942};
4929 4943
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 263cd0909fe0..1f7fff81f24d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -9,7 +9,6 @@ if NET_VENDOR_MEDIATEK
9 9
10config NET_MEDIATEK_SOC 10config NET_MEDIATEK_SOC
11 tristate "MediaTek SoC Gigabit Ethernet support" 11 tristate "MediaTek SoC Gigabit Ethernet support"
12 depends on NET_VENDOR_MEDIATEK
13 select PHYLIB 12 select PHYLIB
14 ---help--- 13 ---help---
15 This driver supports the gigabit ethernet MACs in the 14 This driver supports the gigabit ethernet MACs in the
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6c01314e87b0..db3552f2d087 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); 1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1188 if (err) { 1188 if (err) {
1189 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1189 en_err(priv, "Failed to allocate RSS indirection QP\n");
1190 goto rss_err; 1190 goto qp_alloc_err;
1191 } 1191 }
1192 1192
1193 rss_map->indir_qp->event = mlx4_en_sqp_event; 1193 rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1241,6 +1241,7 @@ indir_err:
1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); 1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp); 1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp); 1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1244qp_alloc_err:
1244 kfree(rss_map->indir_qp); 1245 kfree(rss_map->indir_qp);
1245 rss_map->indir_qp = NULL; 1246 rss_map->indir_qp = NULL;
1246rss_err: 1247rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 5bb6a26ea267..50862275544e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -213,7 +213,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
213 struct mlx5_interface *intf; 213 struct mlx5_interface *intf;
214 214
215 mutex_lock(&mlx5_intf_mutex); 215 mutex_lock(&mlx5_intf_mutex);
216 list_for_each_entry(intf, &intf_list, list) 216 list_for_each_entry_reverse(intf, &intf_list, list)
217 mlx5_remove_device(intf, priv); 217 mlx5_remove_device(intf, priv);
218 list_del(&priv->dev_list); 218 list_del(&priv->dev_list);
219 mutex_unlock(&mlx5_intf_mutex); 219 mutex_unlock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 79d93d6c7d7a..65bec19a438f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -159,7 +159,7 @@ do { \
159enum mlx5e_rq_group { 159enum mlx5e_rq_group {
160 MLX5E_RQ_GROUP_REGULAR, 160 MLX5E_RQ_GROUP_REGULAR,
161 MLX5E_RQ_GROUP_XSK, 161 MLX5E_RQ_GROUP_XSK,
162 MLX5E_NUM_RQ_GROUPS /* Keep last. */ 162#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
163}; 163};
164 164
165static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) 165static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -182,18 +182,15 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
182 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); 182 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
183} 183}
184 184
185/* Use this function to get max num channels after netdev was created */
186static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
187{
188 return min_t(unsigned int,
189 netdev->num_rx_queues / MLX5E_NUM_RQ_GROUPS,
190 netdev->num_tx_queues);
191}
192
193struct mlx5e_tx_wqe { 185struct mlx5e_tx_wqe {
194 struct mlx5_wqe_ctrl_seg ctrl; 186 struct mlx5_wqe_ctrl_seg ctrl;
195 struct mlx5_wqe_eth_seg eth; 187 union {
196 struct mlx5_wqe_data_seg data[0]; 188 struct {
189 struct mlx5_wqe_eth_seg eth;
190 struct mlx5_wqe_data_seg data[0];
191 };
192 u8 tls_progress_params_ctx[0];
193 };
197}; 194};
198 195
199struct mlx5e_rx_wqe_ll { 196struct mlx5e_rx_wqe_ll {
@@ -830,6 +827,7 @@ struct mlx5e_priv {
830 struct net_device *netdev; 827 struct net_device *netdev;
831 struct mlx5e_stats stats; 828 struct mlx5e_stats stats;
832 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; 829 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
830 u16 max_nch;
833 u8 max_opened_tc; 831 u8 max_opened_tc;
834 struct hwtstamp_config tstamp; 832 struct hwtstamp_config tstamp;
835 u16 q_counter; 833 u16 q_counter;
@@ -871,6 +869,7 @@ struct mlx5e_profile {
871 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; 869 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
872 } rx_handlers; 870 } rx_handlers;
873 int max_tc; 871 int max_tc;
872 u8 rq_groups;
874}; 873};
875 874
876void mlx5e_build_ptys2ethtool_map(void); 875void mlx5e_build_ptys2ethtool_map(void);
@@ -1106,6 +1105,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1106u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1105u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1107int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1106int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1108 struct ethtool_ts_info *info); 1107 struct ethtool_ts_info *info);
1108int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1109 struct ethtool_flash *flash);
1109void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1110void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1110 struct ethtool_pauseparam *pauseparam); 1111 struct ethtool_pauseparam *pauseparam);
1111int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1112int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index bd882b5ee9a7..3a615d663d84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -66,9 +66,10 @@ static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
66 *group = qid / nch; 66 *group = qid / nch;
67} 67}
68 68
69static inline bool mlx5e_qid_validate(struct mlx5e_params *params, u64 qid) 69static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
70 struct mlx5e_params *params, u64 qid)
70{ 71{
71 return qid < params->num_channels * MLX5E_NUM_RQ_GROUPS; 72 return qid < params->num_channels * profile->rq_groups;
72} 73}
73 74
74/* Parameter calculations */ 75/* Parameter calculations */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index d5e5afbdca6d..f777994f3005 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -78,9 +78,10 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
78}; 78};
79 79
80static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, 80static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
81 const u32 **arr, u32 *size) 81 const u32 **arr, u32 *size,
82 bool force_legacy)
82{ 83{
83 bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 84 bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
84 85
85 *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : 86 *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
86 ARRAY_SIZE(mlx5e_link_speed); 87 ARRAY_SIZE(mlx5e_link_speed);
@@ -152,7 +153,8 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
152 sizeof(out), MLX5_REG_PTYS, 0, 1); 153 sizeof(out), MLX5_REG_PTYS, 0, 1);
153} 154}
154 155
155u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper) 156u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
157 bool force_legacy)
156{ 158{
157 unsigned long temp = eth_proto_oper; 159 unsigned long temp = eth_proto_oper;
158 const u32 *table; 160 const u32 *table;
@@ -160,7 +162,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
160 u32 max_size; 162 u32 max_size;
161 int i; 163 int i;
162 164
163 mlx5e_port_get_speed_arr(mdev, &table, &max_size); 165 mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
164 i = find_first_bit(&temp, max_size); 166 i = find_first_bit(&temp, max_size);
165 if (i < max_size) 167 if (i < max_size)
166 speed = table[i]; 168 speed = table[i];
@@ -170,6 +172,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
170int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) 172int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
171{ 173{
172 struct mlx5e_port_eth_proto eproto; 174 struct mlx5e_port_eth_proto eproto;
175 bool force_legacy = false;
173 bool ext; 176 bool ext;
174 int err; 177 int err;
175 178
@@ -177,8 +180,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
177 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); 180 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
178 if (err) 181 if (err)
179 goto out; 182 goto out;
180 183 if (ext && !eproto.admin) {
181 *speed = mlx5e_port_ptys2speed(mdev, eproto.oper); 184 force_legacy = true;
185 err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
186 if (err)
187 goto out;
188 }
189 *speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
182 if (!(*speed)) 190 if (!(*speed))
183 err = -EINVAL; 191 err = -EINVAL;
184 192
@@ -201,7 +209,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
201 if (err) 209 if (err)
202 return err; 210 return err;
203 211
204 mlx5e_port_get_speed_arr(mdev, &table, &max_size); 212 mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
205 for (i = 0; i < max_size; ++i) 213 for (i = 0; i < max_size; ++i)
206 if (eproto.cap & MLX5E_PROT_MASK(i)) 214 if (eproto.cap & MLX5E_PROT_MASK(i))
207 max_speed = max(max_speed, table[i]); 215 max_speed = max(max_speed, table[i]);
@@ -210,14 +218,15 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
210 return 0; 218 return 0;
211} 219}
212 220
213u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed) 221u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
222 bool force_legacy)
214{ 223{
215 u32 link_modes = 0; 224 u32 link_modes = 0;
216 const u32 *table; 225 const u32 *table;
217 u32 max_size; 226 u32 max_size;
218 int i; 227 int i;
219 228
220 mlx5e_port_get_speed_arr(mdev, &table, &max_size); 229 mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
221 for (i = 0; i < max_size; ++i) { 230 for (i = 0; i < max_size; ++i) {
222 if (table[i] == speed) 231 if (table[i] == speed)
223 link_modes |= MLX5E_PROT_MASK(i); 232 link_modes |= MLX5E_PROT_MASK(i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 70f536ec51c4..4a7f4497692b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -48,10 +48,12 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
48 u8 *an_disable_cap, u8 *an_disable_admin); 48 u8 *an_disable_cap, u8 *an_disable_admin);
49int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, 49int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
50 u32 proto_admin, bool ext); 50 u32 proto_admin, bool ext);
51u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper); 51u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
52 bool force_legacy);
52int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); 53int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
53int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); 54int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
54u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed); 55u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
56 bool force_legacy);
55 57
56int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); 58int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
57int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); 59int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index f3d98748b211..c7f86453c638 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
76 u8 state; 76 u8 state;
77 int err; 77 int err;
78 78
79 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
80 return 0;
81
82 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); 79 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
83 if (err) { 80 if (err) {
84 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", 81 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
85 sq->sqn, err); 82 sq->sqn, err);
86 return err; 83 goto out;
87 } 84 }
88 85
89 if (state != MLX5_SQC_STATE_ERR) { 86 if (state != MLX5_SQC_STATE_ERR)
90 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); 87 goto out;
91 return -EINVAL;
92 }
93 88
94 mlx5e_tx_disable_queue(sq->txq); 89 mlx5e_tx_disable_queue(sq->txq);
95 90
96 err = mlx5e_wait_for_sq_flush(sq); 91 err = mlx5e_wait_for_sq_flush(sq);
97 if (err) 92 if (err)
98 return err; 93 goto out;
99 94
100 /* At this point, no new packets will arrive from the stack as TXQ is 95 /* At this point, no new packets will arrive from the stack as TXQ is
101 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all 96 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
104 99
105 err = mlx5e_sq_to_ready(sq, state); 100 err = mlx5e_sq_to_ready(sq, state);
106 if (err) 101 if (err)
107 return err; 102 goto out;
108 103
109 mlx5e_reset_txqsq_cc_pc(sq); 104 mlx5e_reset_txqsq_cc_pc(sq);
110 sq->stats->recover++; 105 sq->stats->recover++;
106 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
111 mlx5e_activate_txqsq(sq); 107 mlx5e_activate_txqsq(sq);
112 108
113 return 0; 109 return 0;
110out:
111 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
112 return err;
114} 113}
115 114
116static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, 115static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index aaffa6f68dc0..7f78c004d12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -143,7 +143,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
143{ 143{
144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
145 /* TX queue is created active. */ 145 /* TX queue is created active. */
146
147 spin_lock(&c->xskicosq_lock);
146 mlx5e_trigger_irq(&c->xskicosq); 148 mlx5e_trigger_irq(&c->xskicosq);
149 spin_unlock(&c->xskicosq_lock);
147} 150}
148 151
149void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 152void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 407da83474ef..b7298f9ee3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,12 +11,14 @@
11#include "accel/tls.h" 11#include "accel/tls.h"
12 12
13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ 13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14 (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) 14 (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
15 MLX5_ST_SZ_BYTES(tls_static_params))
15#define MLX5E_KTLS_STATIC_WQEBBS \ 16#define MLX5E_KTLS_STATIC_WQEBBS \
16 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) 17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
17 18
18#define MLX5E_KTLS_PROGRESS_WQE_SZ \ 19#define MLX5E_KTLS_PROGRESS_WQE_SZ \
19 (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) 20 (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
21 MLX5_ST_SZ_BYTES(tls_progress_params))
20#define MLX5E_KTLS_PROGRESS_WQEBBS \ 22#define MLX5E_KTLS_PROGRESS_WQEBBS \
21 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) 23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
22#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 24#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index ea032f54197e..7833ddef0427 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | 69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70 STATIC_PARAMS_DS_CNT); 70 STATIC_PARAMS_DS_CNT);
71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72 cseg->imm = cpu_to_be32(priv_tx->tisn); 72 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
73 73
74 ucseg->flags = MLX5_UMR_INLINE; 74 ucseg->flags = MLX5_UMR_INLINE;
75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); 75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
80static void 80static void
81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) 81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82{ 82{
83 MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); 83 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
84 MLX5_SET(tls_progress_params, ctx, record_tracker_state, 84 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); 85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86 MLX5_SET(tls_progress_params, ctx, auth_state, 86 MLX5_SET(tls_progress_params, ctx, auth_state,
@@ -104,18 +104,20 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
104 PROGRESS_PARAMS_DS_CNT); 104 PROGRESS_PARAMS_DS_CNT);
105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106 106
107 fill_progress_params_ctx(wqe->data, priv_tx); 107 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
108} 108}
109 109
110static void tx_fill_wi(struct mlx5e_txqsq *sq, 110static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs, 111 u16 pi, u8 num_wqebbs,
112 skb_frag_t *resync_dump_frag) 112 skb_frag_t *resync_dump_frag,
113 u32 num_bytes)
113{ 114{
114 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 115 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115 116
116 wi->skb = NULL; 117 wi->skb = NULL;
117 wi->num_wqebbs = num_wqebbs; 118 wi->num_wqebbs = num_wqebbs;
118 wi->resync_dump_frag = resync_dump_frag; 119 wi->resync_dump_frag = resync_dump_frag;
120 wi->num_bytes = num_bytes;
119} 121}
120 122
121void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) 123void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
143 145
144 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); 146 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); 147 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); 148 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
147 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; 149 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
148} 150}
149 151
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
157 159
158 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); 160 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); 161 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); 162 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
161 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; 163 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
162} 164}
163 165
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
248 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); 250 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
249} 251}
250 252
253struct mlx5e_dump_wqe {
254 struct mlx5_wqe_ctrl_seg ctrl;
255 struct mlx5_wqe_data_seg data;
256};
257
251static int 258static int
252tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, 259tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 skb_frag_t *frag, u32 tisn, bool first) 260 skb_frag_t *frag, u32 tisn, bool first)
254{ 261{
255 struct mlx5_wqe_ctrl_seg *cseg; 262 struct mlx5_wqe_ctrl_seg *cseg;
256 struct mlx5_wqe_eth_seg *eseg;
257 struct mlx5_wqe_data_seg *dseg; 263 struct mlx5_wqe_data_seg *dseg;
258 struct mlx5e_tx_wqe *wqe; 264 struct mlx5e_dump_wqe *wqe;
259 dma_addr_t dma_addr = 0; 265 dma_addr_t dma_addr = 0;
260 u16 ds_cnt, ds_cnt_inl;
261 u8 num_wqebbs; 266 u8 num_wqebbs;
262 u16 pi, ihs; 267 u16 ds_cnt;
263 int fsz; 268 int fsz;
264 269 u16 pi;
265 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266 ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268 ds_cnt += ds_cnt_inl;
269 ds_cnt += 1; /* one frag */
270 270
271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272 272
273 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 274 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 275
275 cseg = &wqe->ctrl; 276 cseg = &wqe->ctrl;
276 eseg = &wqe->eth; 277 dseg = &wqe->data;
277 dseg = wqe->data;
278 278
279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); 279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281 cseg->imm = cpu_to_be32(tisn); 281 cseg->tisn = cpu_to_be32(tisn << 8);
282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283 283
284 eseg->inline_hdr.sz = cpu_to_be16(ihs);
285 memcpy(eseg->inline_hdr.start, skb->data, ihs);
286 dseg += ds_cnt_inl;
287
288 fsz = skb_frag_size(frag); 284 fsz = skb_frag_size(frag);
289 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 285 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
290 DMA_TO_DEVICE); 286 DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
296 dseg->byte_count = cpu_to_be32(fsz); 292 dseg->byte_count = cpu_to_be32(fsz);
297 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 293 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
298 294
299 tx_fill_wi(sq, pi, num_wqebbs, frag); 295 tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
300 sq->pc += num_wqebbs; 296 sq->pc += num_wqebbs;
301 297
302 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, 298 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
323 struct mlx5_wq_cyc *wq = &sq->wq; 319 struct mlx5_wq_cyc *wq = &sq->wq;
324 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 320 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
325 321
326 tx_fill_wi(sq, pi, 1, NULL); 322 tx_fill_wi(sq, pi, 1, NULL, 0);
327 323
328 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); 324 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
329} 325}
@@ -412,7 +408,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
412 goto out; 408 goto out;
413 409
414 tls_ctx = tls_get_ctx(skb->sk); 410 tls_ctx = tls_get_ctx(skb->sk);
415 if (unlikely(tls_ctx->netdev != netdev)) 411 if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
416 goto err_out; 412 goto err_out;
417 413
418 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); 414 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
@@ -434,7 +430,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
434 priv_tx->expected_seq = seq + datalen; 430 priv_tx->expected_seq = seq + datalen;
435 431
436 cseg = &(*wqe)->ctrl; 432 cseg = &(*wqe)->ctrl;
437 cseg->imm = cpu_to_be32(priv_tx->tisn); 433 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
438 434
439 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
440 stats->tls_encrypted_bytes += datalen; 436 stats->tls_encrypted_bytes += datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 8657e0f26995..2c75b2752f58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
437 return &arfs_t->rules_hash[bucket_idx]; 437 return &arfs_t->rules_hash[bucket_idx];
438} 438}
439 439
440static u8 arfs_get_ip_proto(const struct sk_buff *skb)
441{
442 return (skb->protocol == htons(ETH_P_IP)) ?
443 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
444}
445
446static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, 440static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
447 u8 ip_proto, __be16 etype) 441 u8 ip_proto, __be16 etype)
448{ 442{
@@ -602,31 +596,9 @@ out:
602 arfs_may_expire_flow(priv); 596 arfs_may_expire_flow(priv);
603} 597}
604 598
605/* return L4 destination port from ip4/6 packets */
606static __be16 arfs_get_dst_port(const struct sk_buff *skb)
607{
608 char *transport_header;
609
610 transport_header = skb_transport_header(skb);
611 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
612 return ((struct tcphdr *)transport_header)->dest;
613 return ((struct udphdr *)transport_header)->dest;
614}
615
616/* return L4 source port from ip4/6 packets */
617static __be16 arfs_get_src_port(const struct sk_buff *skb)
618{
619 char *transport_header;
620
621 transport_header = skb_transport_header(skb);
622 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
623 return ((struct tcphdr *)transport_header)->source;
624 return ((struct udphdr *)transport_header)->source;
625}
626
627static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, 599static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
628 struct arfs_table *arfs_t, 600 struct arfs_table *arfs_t,
629 const struct sk_buff *skb, 601 const struct flow_keys *fk,
630 u16 rxq, u32 flow_id) 602 u16 rxq, u32 flow_id)
631{ 603{
632 struct arfs_rule *rule; 604 struct arfs_rule *rule;
@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
641 INIT_WORK(&rule->arfs_work, arfs_handle_work); 613 INIT_WORK(&rule->arfs_work, arfs_handle_work);
642 614
643 tuple = &rule->tuple; 615 tuple = &rule->tuple;
644 tuple->etype = skb->protocol; 616 tuple->etype = fk->basic.n_proto;
617 tuple->ip_proto = fk->basic.ip_proto;
645 if (tuple->etype == htons(ETH_P_IP)) { 618 if (tuple->etype == htons(ETH_P_IP)) {
646 tuple->src_ipv4 = ip_hdr(skb)->saddr; 619 tuple->src_ipv4 = fk->addrs.v4addrs.src;
647 tuple->dst_ipv4 = ip_hdr(skb)->daddr; 620 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
648 } else { 621 } else {
649 memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 622 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
650 sizeof(struct in6_addr)); 623 sizeof(struct in6_addr));
651 memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 624 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr)); 625 sizeof(struct in6_addr));
653 } 626 }
654 tuple->ip_proto = arfs_get_ip_proto(skb); 627 tuple->src_port = fk->ports.src;
655 tuple->src_port = arfs_get_src_port(skb); 628 tuple->dst_port = fk->ports.dst;
656 tuple->dst_port = arfs_get_dst_port(skb);
657 629
658 rule->flow_id = flow_id; 630 rule->flow_id = flow_id;
659 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; 631 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
664 return rule; 636 return rule;
665} 637}
666 638
667static bool arfs_cmp_ips(struct arfs_tuple *tuple, 639static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
668 const struct sk_buff *skb)
669{ 640{
670 if (tuple->etype == htons(ETH_P_IP) && 641 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
671 tuple->src_ipv4 == ip_hdr(skb)->saddr && 642 return false;
672 tuple->dst_ipv4 == ip_hdr(skb)->daddr) 643 if (tuple->etype != fk->basic.n_proto)
673 return true; 644 return false;
674 if (tuple->etype == htons(ETH_P_IPV6) && 645 if (tuple->etype == htons(ETH_P_IP))
675 (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 646 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
676 sizeof(struct in6_addr))) && 647 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
677 (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 648 if (tuple->etype == htons(ETH_P_IPV6))
678 sizeof(struct in6_addr)))) 649 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
679 return true; 650 sizeof(struct in6_addr)) &&
651 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr));
680 return false; 653 return false;
681} 654}
682 655
683static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, 656static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
684 const struct sk_buff *skb) 657 const struct flow_keys *fk)
685{ 658{
686 struct arfs_rule *arfs_rule; 659 struct arfs_rule *arfs_rule;
687 struct hlist_head *head; 660 struct hlist_head *head;
688 __be16 src_port = arfs_get_src_port(skb);
689 __be16 dst_port = arfs_get_dst_port(skb);
690 661
691 head = arfs_hash_bucket(arfs_t, src_port, dst_port); 662 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
692 hlist_for_each_entry(arfs_rule, head, hlist) { 663 hlist_for_each_entry(arfs_rule, head, hlist) {
693 if (arfs_rule->tuple.src_port == src_port && 664 if (arfs_cmp(&arfs_rule->tuple, fk))
694 arfs_rule->tuple.dst_port == dst_port &&
695 arfs_cmp_ips(&arfs_rule->tuple, skb)) {
696 return arfs_rule; 665 return arfs_rule;
697 }
698 } 666 }
699 667
700 return NULL; 668 return NULL;
@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
707 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; 675 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
708 struct arfs_table *arfs_t; 676 struct arfs_table *arfs_t;
709 struct arfs_rule *arfs_rule; 677 struct arfs_rule *arfs_rule;
678 struct flow_keys fk;
679
680 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
681 return -EPROTONOSUPPORT;
710 682
711 if (skb->protocol != htons(ETH_P_IP) && 683 if (fk.basic.n_proto != htons(ETH_P_IP) &&
712 skb->protocol != htons(ETH_P_IPV6)) 684 fk.basic.n_proto != htons(ETH_P_IPV6))
713 return -EPROTONOSUPPORT; 685 return -EPROTONOSUPPORT;
714 686
715 if (skb->encapsulation) 687 if (skb->encapsulation)
716 return -EPROTONOSUPPORT; 688 return -EPROTONOSUPPORT;
717 689
718 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 690 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
719 if (!arfs_t) 691 if (!arfs_t)
720 return -EPROTONOSUPPORT; 692 return -EPROTONOSUPPORT;
721 693
722 spin_lock_bh(&arfs->arfs_lock); 694 spin_lock_bh(&arfs->arfs_lock);
723 arfs_rule = arfs_find_rule(arfs_t, skb); 695 arfs_rule = arfs_find_rule(arfs_t, &fk);
724 if (arfs_rule) { 696 if (arfs_rule) {
725 if (arfs_rule->rxq == rxq_index) { 697 if (arfs_rule->rxq == rxq_index) {
726 spin_unlock_bh(&arfs->arfs_lock); 698 spin_unlock_bh(&arfs->arfs_lock);
@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
728 } 700 }
729 arfs_rule->rxq = rxq_index; 701 arfs_rule->rxq = rxq_index;
730 } else { 702 } else {
731 arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, 703 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
732 rxq_index, flow_id);
733 if (!arfs_rule) { 704 if (!arfs_rule) {
734 spin_unlock_bh(&arfs->arfs_lock); 705 spin_unlock_bh(&arfs->arfs_lock);
735 return -ENOMEM; 706 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 126ec4181286..20e628c907e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -391,7 +391,7 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
391{ 391{
392 mutex_lock(&priv->state_lock); 392 mutex_lock(&priv->state_lock);
393 393
394 ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev); 394 ch->max_combined = priv->max_nch;
395 ch->combined_count = priv->channels.params.num_channels; 395 ch->combined_count = priv->channels.params.num_channels;
396 if (priv->xsk.refcnt) { 396 if (priv->xsk.refcnt) {
397 /* The upper half are XSK queues. */ 397 /* The upper half are XSK queues. */
@@ -785,7 +785,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
785} 785}
786 786
787static void get_speed_duplex(struct net_device *netdev, 787static void get_speed_duplex(struct net_device *netdev,
788 u32 eth_proto_oper, 788 u32 eth_proto_oper, bool force_legacy,
789 struct ethtool_link_ksettings *link_ksettings) 789 struct ethtool_link_ksettings *link_ksettings)
790{ 790{
791 struct mlx5e_priv *priv = netdev_priv(netdev); 791 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -795,7 +795,7 @@ static void get_speed_duplex(struct net_device *netdev,
795 if (!netif_carrier_ok(netdev)) 795 if (!netif_carrier_ok(netdev))
796 goto out; 796 goto out;
797 797
798 speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper); 798 speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
799 if (!speed) { 799 if (!speed) {
800 speed = SPEED_UNKNOWN; 800 speed = SPEED_UNKNOWN;
801 goto out; 801 goto out;
@@ -914,8 +914,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
914 /* Fields: eth_proto_admin and ext_eth_proto_admin are 914 /* Fields: eth_proto_admin and ext_eth_proto_admin are
915 * mutually exclusive. Hence try reading legacy advertising 915 * mutually exclusive. Hence try reading legacy advertising
916 * when extended advertising is zero. 916 * when extended advertising is zero.
917 * admin_ext indicates how eth_proto_admin should be 917 * admin_ext indicates which proto_admin (ext vs. legacy)
918 * interpreted 918 * should be read and interpreted
919 */ 919 */
920 admin_ext = ext; 920 admin_ext = ext;
921 if (ext && !eth_proto_admin) { 921 if (ext && !eth_proto_admin) {
@@ -924,7 +924,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
924 admin_ext = false; 924 admin_ext = false;
925 } 925 }
926 926
927 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 927 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext,
928 eth_proto_oper); 928 eth_proto_oper);
929 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); 929 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
930 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); 930 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
@@ -939,7 +939,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
939 get_supported(mdev, eth_proto_cap, link_ksettings); 939 get_supported(mdev, eth_proto_cap, link_ksettings);
940 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, 940 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
941 admin_ext); 941 admin_ext);
942 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); 942 get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
943 link_ksettings);
943 944
944 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 945 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
945 946
@@ -1016,45 +1017,77 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
1016 return ptys_modes; 1017 return ptys_modes;
1017} 1018}
1018 1019
1020static bool ext_link_mode_requested(const unsigned long *adver)
1021{
1022#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
1023 int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
1024 __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
1025
1026 bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
1027 return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
1028}
1029
1030static bool ext_speed_requested(u32 speed)
1031{
1032#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
1033 return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
1034}
1035
1036static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
1037{
1038 bool ext_link_mode = ext_link_mode_requested(adver);
1039 bool ext_speed = ext_speed_requested(speed);
1040
1041 return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
1042}
1043
1019int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, 1044int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1020 const struct ethtool_link_ksettings *link_ksettings) 1045 const struct ethtool_link_ksettings *link_ksettings)
1021{ 1046{
1022 struct mlx5_core_dev *mdev = priv->mdev; 1047 struct mlx5_core_dev *mdev = priv->mdev;
1023 struct mlx5e_port_eth_proto eproto; 1048 struct mlx5e_port_eth_proto eproto;
1049 const unsigned long *adver;
1024 bool an_changes = false; 1050 bool an_changes = false;
1025 u8 an_disable_admin; 1051 u8 an_disable_admin;
1026 bool ext_supported; 1052 bool ext_supported;
1027 bool ext_requested;
1028 u8 an_disable_cap; 1053 u8 an_disable_cap;
1029 bool an_disable; 1054 bool an_disable;
1030 u32 link_modes; 1055 u32 link_modes;
1031 u8 an_status; 1056 u8 an_status;
1057 u8 autoneg;
1032 u32 speed; 1058 u32 speed;
1059 bool ext;
1033 int err; 1060 int err;
1034 1061
1035 u32 (*ethtool2ptys_adver_func)(const unsigned long *adver); 1062 u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
1036 1063
1037#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) 1064 adver = link_ksettings->link_modes.advertising;
1065 autoneg = link_ksettings->base.autoneg;
1066 speed = link_ksettings->base.speed;
1038 1067
1039 ext_requested = !!(link_ksettings->link_modes.advertising[0] > 1068 ext = ext_requested(autoneg, adver, speed),
1040 MLX5E_PTYS_EXT ||
1041 link_ksettings->link_modes.advertising[1]);
1042 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1069 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
1043 ext_requested &= ext_supported; 1070 if (!ext_supported && ext)
1071 return -EOPNOTSUPP;
1044 1072
1045 speed = link_ksettings->base.speed; 1073 ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
1046 ethtool2ptys_adver_func = ext_requested ?
1047 mlx5e_ethtool2ptys_ext_adver_link :
1048 mlx5e_ethtool2ptys_adver_link; 1074 mlx5e_ethtool2ptys_adver_link;
1049 err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); 1075 err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
1050 if (err) { 1076 if (err) {
1051 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", 1077 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
1052 __func__, err); 1078 __func__, err);
1053 goto out; 1079 goto out;
1054 } 1080 }
1055 link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? 1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
1056 ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) : 1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext);
1057 mlx5e_port_speed2linkmodes(mdev, speed); 1083
1084 if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
1085 autoneg != AUTONEG_ENABLE) {
1086 netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
1087 __func__);
1088 err = -EINVAL;
1089 goto out;
1090 }
1058 1091
1059 link_modes = link_modes & eproto.cap; 1092 link_modes = link_modes & eproto.cap;
1060 if (!link_modes) { 1093 if (!link_modes) {
@@ -1067,14 +1100,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1067 mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap, 1100 mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
1068 &an_disable_admin); 1101 &an_disable_admin);
1069 1102
1070 an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; 1103 an_disable = autoneg == AUTONEG_DISABLE;
1071 an_changes = ((!an_disable && an_disable_admin) || 1104 an_changes = ((!an_disable && an_disable_admin) ||
1072 (an_disable && !an_disable_admin)); 1105 (an_disable && !an_disable_admin));
1073 1106
1074 if (!an_changes && link_modes == eproto.admin) 1107 if (!an_changes && link_modes == eproto.admin)
1075 goto out; 1108 goto out;
1076 1109
1077 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); 1110 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
1078 mlx5_toggle_port_link(mdev); 1111 mlx5_toggle_port_link(mdev);
1079 1112
1080out: 1113out:
@@ -1313,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1313 struct mlx5_core_dev *mdev = priv->mdev; 1346 struct mlx5_core_dev *mdev = priv->mdev;
1314 int err; 1347 int err;
1315 1348
1349 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1350 return -EOPNOTSUPP;
1351
1316 if (pauseparam->autoneg) 1352 if (pauseparam->autoneg)
1317 return -EINVAL; 1353 return -EINVAL;
1318 1354
@@ -1654,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
1654 return 0; 1690 return 0;
1655} 1691}
1656 1692
1693int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1694 struct ethtool_flash *flash)
1695{
1696 struct mlx5_core_dev *mdev = priv->mdev;
1697 struct net_device *dev = priv->netdev;
1698 const struct firmware *fw;
1699 int err;
1700
1701 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
1702 return -EOPNOTSUPP;
1703
1704 err = request_firmware_direct(&fw, flash->data, &dev->dev);
1705 if (err)
1706 return err;
1707
1708 dev_hold(dev);
1709 rtnl_unlock();
1710
1711 err = mlx5_firmware_flash(mdev, fw, NULL);
1712 release_firmware(fw);
1713
1714 rtnl_lock();
1715 dev_put(dev);
1716 return err;
1717}
1718
1719static int mlx5e_flash_device(struct net_device *dev,
1720 struct ethtool_flash *flash)
1721{
1722 struct mlx5e_priv *priv = netdev_priv(dev);
1723
1724 return mlx5e_ethtool_flash_device(priv, flash);
1725}
1726
1657static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, 1727static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
1658 bool is_rx_cq) 1728 bool is_rx_cq)
1659{ 1729{
@@ -1936,6 +2006,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1936 .set_wol = mlx5e_set_wol, 2006 .set_wol = mlx5e_set_wol,
1937 .get_module_info = mlx5e_get_module_info, 2007 .get_module_info = mlx5e_get_module_info,
1938 .get_module_eeprom = mlx5e_get_module_eeprom, 2008 .get_module_eeprom = mlx5e_get_module_eeprom,
2009 .flash_device = mlx5e_flash_device,
1939 .get_priv_flags = mlx5e_get_priv_flags, 2010 .get_priv_flags = mlx5e_get_priv_flags,
1940 .set_priv_flags = mlx5e_set_priv_flags, 2011 .set_priv_flags = mlx5e_set_priv_flags,
1941 .self_test = mlx5e_self_test, 2012 .self_test = mlx5e_self_test,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index ea3a490b569a..94304abc49e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -611,7 +611,8 @@ static int validate_flow(struct mlx5e_priv *priv,
611 return -ENOSPC; 611 return -ENOSPC;
612 612
613 if (fs->ring_cookie != RX_CLS_FLOW_DISC) 613 if (fs->ring_cookie != RX_CLS_FLOW_DISC)
614 if (!mlx5e_qid_validate(&priv->channels.params, fs->ring_cookie)) 614 if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
615 fs->ring_cookie))
615 return -EINVAL; 616 return -EINVAL;
616 617
617 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 618 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47eea6b3a1c3..9d5f6e56188f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -331,12 +331,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
331 331
332static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) 332static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
333{ 333{
334 struct mlx5e_wqe_frag_info next_frag, *prev; 334 struct mlx5e_wqe_frag_info next_frag = {};
335 struct mlx5e_wqe_frag_info *prev = NULL;
335 int i; 336 int i;
336 337
337 next_frag.di = &rq->wqe.di[0]; 338 next_frag.di = &rq->wqe.di[0];
338 next_frag.offset = 0;
339 prev = NULL;
340 339
341 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { 340 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
342 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; 341 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -1322,7 +1321,6 @@ err_free_txqsq:
1322void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) 1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1323{ 1322{
1324 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); 1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1325 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1326 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1324 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1327 netdev_tx_reset_queue(sq->txq); 1325 netdev_tx_reset_queue(sq->txq);
1328 netif_tx_start_queue(sq->txq); 1326 netif_tx_start_queue(sq->txq);
@@ -1677,10 +1675,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
1677 struct mlx5e_channel_param *cparam) 1675 struct mlx5e_channel_param *cparam)
1678{ 1676{
1679 struct mlx5e_priv *priv = c->priv; 1677 struct mlx5e_priv *priv = c->priv;
1680 int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev); 1678 int err, tc;
1681 1679
1682 for (tc = 0; tc < params->num_tc; tc++) { 1680 for (tc = 0; tc < params->num_tc; tc++) {
1683 int txq_ix = c->ix + tc * max_nch; 1681 int txq_ix = c->ix + tc * priv->max_nch;
1684 1682
1685 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, 1683 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1686 params, &cparam->sq, &c->sq[tc], tc); 1684 params, &cparam->sq, &c->sq[tc], tc);
@@ -2438,11 +2436,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2438 2436
2439int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 2437int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
2440{ 2438{
2441 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
2442 int err; 2439 int err;
2443 int ix; 2440 int ix;
2444 2441
2445 for (ix = 0; ix < max_nch; ix++) { 2442 for (ix = 0; ix < priv->max_nch; ix++) {
2446 err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); 2443 err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
2447 if (unlikely(err)) 2444 if (unlikely(err))
2448 goto err_destroy_rqts; 2445 goto err_destroy_rqts;
@@ -2460,10 +2457,9 @@ err_destroy_rqts:
2460 2457
2461void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 2458void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
2462{ 2459{
2463 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
2464 int i; 2460 int i;
2465 2461
2466 for (i = 0; i < max_nch; i++) 2462 for (i = 0; i < priv->max_nch; i++)
2467 mlx5e_destroy_rqt(priv, &tirs[i].rqt); 2463 mlx5e_destroy_rqt(priv, &tirs[i].rqt);
2468} 2464}
2469 2465
@@ -2557,7 +2553,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2557 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); 2553 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2558 } 2554 }
2559 2555
2560 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { 2556 for (ix = 0; ix < priv->max_nch; ix++) {
2561 struct mlx5e_redirect_rqt_param direct_rrp = { 2557 struct mlx5e_redirect_rqt_param direct_rrp = {
2562 .is_rss = false, 2558 .is_rss = false,
2563 { 2559 {
@@ -2758,7 +2754,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2758 goto free_in; 2754 goto free_in;
2759 } 2755 }
2760 2756
2761 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { 2757 for (ix = 0; ix < priv->max_nch; ix++) {
2762 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, 2758 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2763 in, inlen); 2759 in, inlen);
2764 if (err) 2760 if (err)
@@ -2858,12 +2854,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2858 2854
2859static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) 2855static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
2860{ 2856{
2861 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
2862 int i, tc; 2857 int i, tc;
2863 2858
2864 for (i = 0; i < max_nch; i++) 2859 for (i = 0; i < priv->max_nch; i++)
2865 for (tc = 0; tc < priv->profile->max_tc; tc++) 2860 for (tc = 0; tc < priv->profile->max_tc; tc++)
2866 priv->channel_tc2txq[i][tc] = i + tc * max_nch; 2861 priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
2867} 2862}
2868 2863
2869static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) 2864static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
@@ -2884,7 +2879,7 @@ static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
2884void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) 2879void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2885{ 2880{
2886 int num_txqs = priv->channels.num * priv->channels.params.num_tc; 2881 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2887 int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS; 2882 int num_rxqs = priv->channels.num * priv->profile->rq_groups;
2888 struct net_device *netdev = priv->netdev; 2883 struct net_device *netdev = priv->netdev;
2889 2884
2890 mlx5e_netdev_set_tcs(netdev); 2885 mlx5e_netdev_set_tcs(netdev);
@@ -3306,7 +3301,6 @@ err_destroy_inner_tirs:
3306 3301
3307int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 3302int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3308{ 3303{
3309 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
3310 struct mlx5e_tir *tir; 3304 struct mlx5e_tir *tir;
3311 void *tirc; 3305 void *tirc;
3312 int inlen; 3306 int inlen;
@@ -3319,7 +3313,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3319 if (!in) 3313 if (!in)
3320 return -ENOMEM; 3314 return -ENOMEM;
3321 3315
3322 for (ix = 0; ix < max_nch; ix++) { 3316 for (ix = 0; ix < priv->max_nch; ix++) {
3323 memset(in, 0, inlen); 3317 memset(in, 0, inlen);
3324 tir = &tirs[ix]; 3318 tir = &tirs[ix];
3325 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 3319 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
@@ -3358,10 +3352,9 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
3358 3352
3359void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 3353void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3360{ 3354{
3361 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
3362 int i; 3355 int i;
3363 3356
3364 for (i = 0; i < max_nch; i++) 3357 for (i = 0; i < priv->max_nch; i++)
3365 mlx5e_destroy_tir(priv->mdev, &tirs[i]); 3358 mlx5e_destroy_tir(priv->mdev, &tirs[i]);
3366} 3359}
3367 3360
@@ -3487,7 +3480,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
3487{ 3480{
3488 int i; 3481 int i;
3489 3482
3490 for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) { 3483 for (i = 0; i < priv->max_nch; i++) {
3491 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; 3484 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
3492 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; 3485 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
3493 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; 3486 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -4960,8 +4953,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
4960 return err; 4953 return err;
4961 4954
4962 mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params, 4955 mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
4963 mlx5e_get_netdev_max_channels(netdev), 4956 priv->max_nch, netdev->mtu);
4964 netdev->mtu);
4965 4957
4966 mlx5e_timestamp_init(priv); 4958 mlx5e_timestamp_init(priv);
4967 4959
@@ -5164,6 +5156,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
5164 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, 5156 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
5165 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 5157 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
5166 .max_tc = MLX5E_MAX_NUM_TC, 5158 .max_tc = MLX5E_MAX_NUM_TC,
5159 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
5167}; 5160};
5168 5161
5169/* mlx5e generic netdev management API (move to en_common.c) */ 5162/* mlx5e generic netdev management API (move to en_common.c) */
@@ -5181,6 +5174,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
5181 priv->profile = profile; 5174 priv->profile = profile;
5182 priv->ppriv = ppriv; 5175 priv->ppriv = ppriv;
5183 priv->msglevel = MLX5E_MSG_LEVEL; 5176 priv->msglevel = MLX5E_MSG_LEVEL;
5177 priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
5184 priv->max_opened_tc = 1; 5178 priv->max_opened_tc = 1;
5185 5179
5186 mutex_init(&priv->state_lock); 5180 mutex_init(&priv->state_lock);
@@ -5218,7 +5212,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
5218 5212
5219 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), 5213 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
5220 nch * profile->max_tc, 5214 nch * profile->max_tc,
5221 nch * MLX5E_NUM_RQ_GROUPS); 5215 nch * profile->rq_groups);
5222 if (!netdev) { 5216 if (!netdev) {
5223 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); 5217 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5224 return NULL; 5218 return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7245d287633d..d0684fdb69e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
735 list_add(&indr_priv->list, 735 list_add(&indr_priv->list,
736 &rpriv->uplink_priv.tc_indr_block_priv_list); 736 &rpriv->uplink_priv.tc_indr_block_priv_list);
737 737
738 block_cb = flow_block_cb_alloc(f->net, 738 block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
739 mlx5e_rep_indr_setup_block_cb,
740 indr_priv, indr_priv, 739 indr_priv, indr_priv,
741 mlx5e_rep_indr_tc_block_unbind); 740 mlx5e_rep_indr_tc_block_unbind);
742 if (IS_ERR(block_cb)) { 741 if (IS_ERR(block_cb)) {
@@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
753 if (!indr_priv) 752 if (!indr_priv)
754 return -ENOENT; 753 return -ENOENT;
755 754
756 block_cb = flow_block_cb_lookup(f, 755 block_cb = flow_block_cb_lookup(f->block,
757 mlx5e_rep_indr_setup_block_cb, 756 mlx5e_rep_indr_setup_block_cb,
758 indr_priv); 757 indr_priv);
759 if (!block_cb) 758 if (!block_cb)
@@ -1702,6 +1701,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
1702 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1701 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1703 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 1702 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1704 .max_tc = 1, 1703 .max_tc = 1,
1704 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1705}; 1705};
1706 1706
1707static const struct mlx5e_profile mlx5e_uplink_rep_profile = { 1707static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1719,6 +1719,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1719 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1719 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1720 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 1720 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1721 .max_tc = MLX5E_MAX_NUM_TC, 1721 .max_tc = MLX5E_MAX_NUM_TC,
1722 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1722}; 1723};
1723 1724
1724static bool 1725static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 539b4d3656da..57f9f346d213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -172,7 +172,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
172 172
173 memset(s, 0, sizeof(*s)); 173 memset(s, 0, sizeof(*s));
174 174
175 for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) { 175 for (i = 0; i < priv->max_nch; i++) {
176 struct mlx5e_channel_stats *channel_stats = 176 struct mlx5e_channel_stats *channel_stats =
177 &priv->channel_stats[i]; 177 &priv->channel_stats[i];
178 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; 178 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
@@ -1395,7 +1395,7 @@ static const struct counter_desc ch_stats_desc[] = {
1395 1395
1396static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) 1396static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1397{ 1397{
1398 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); 1398 int max_nch = priv->max_nch;
1399 1399
1400 return (NUM_RQ_STATS * max_nch) + 1400 return (NUM_RQ_STATS * max_nch) +
1401 (NUM_CH_STATS * max_nch) + 1401 (NUM_CH_STATS * max_nch) +
@@ -1409,8 +1409,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1409static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, 1409static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1410 int idx) 1410 int idx)
1411{ 1411{
1412 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1413 bool is_xsk = priv->xsk.ever_used; 1412 bool is_xsk = priv->xsk.ever_used;
1413 int max_nch = priv->max_nch;
1414 int i, j, tc; 1414 int i, j, tc;
1415 1415
1416 for (i = 0; i < max_nch; i++) 1416 for (i = 0; i < max_nch; i++)
@@ -1452,8 +1452,8 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1452static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, 1452static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1453 int idx) 1453 int idx)
1454{ 1454{
1455 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1456 bool is_xsk = priv->xsk.ever_used; 1455 bool is_xsk = priv->xsk.ever_used;
1456 int max_nch = priv->max_nch;
1457 int i, j, tc; 1457 int i, j, tc;
1458 1458
1459 for (i = 0; i < max_nch; i++) 1459 for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cc096f6011d9..00b2d4a86159 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1230,13 +1230,13 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1230void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) 1230void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1231{ 1231{
1232 struct mlx5e_neigh *m_neigh = &nhe->m_neigh; 1232 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1233 u64 bytes, packets, lastuse = 0;
1234 struct mlx5e_tc_flow *flow; 1233 struct mlx5e_tc_flow *flow;
1235 struct mlx5e_encap_entry *e; 1234 struct mlx5e_encap_entry *e;
1236 struct mlx5_fc *counter; 1235 struct mlx5_fc *counter;
1237 struct neigh_table *tbl; 1236 struct neigh_table *tbl;
1238 bool neigh_used = false; 1237 bool neigh_used = false;
1239 struct neighbour *n; 1238 struct neighbour *n;
1239 u64 lastuse;
1240 1240
1241 if (m_neigh->family == AF_INET) 1241 if (m_neigh->family == AF_INET)
1242 tbl = &arp_tbl; 1242 tbl = &arp_tbl;
@@ -1256,7 +1256,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1256 encaps[efi->index]); 1256 encaps[efi->index]);
1257 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { 1257 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1258 counter = mlx5e_tc_get_counter(flow); 1258 counter = mlx5e_tc_get_counter(flow);
1259 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 1259 lastuse = mlx5_fc_query_lastuse(counter);
1260 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { 1260 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1261 neigh_used = true; 1261 neigh_used = true;
1262 break; 1262 break;
@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1480 struct mlx5_flow_spec *spec, 1480 struct mlx5_flow_spec *spec,
1481 struct flow_cls_offload *f, 1481 struct flow_cls_offload *f,
1482 struct net_device *filter_dev, 1482 struct net_device *filter_dev,
1483 u8 *match_level, u8 *tunnel_match_level) 1483 u8 *inner_match_level, u8 *outer_match_level)
1484{ 1484{
1485 struct netlink_ext_ack *extack = f->common.extack; 1485 struct netlink_ext_ack *extack = f->common.extack;
1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1495 struct flow_dissector *dissector = rule->match.dissector; 1495 struct flow_dissector *dissector = rule->match.dissector;
1496 u16 addr_type = 0; 1496 u16 addr_type = 0;
1497 u8 ip_proto = 0; 1497 u8 ip_proto = 0;
1498 u8 *match_level;
1498 1499
1499 *match_level = MLX5_MATCH_NONE; 1500 match_level = outer_match_level;
1500 1501
1501 if (dissector->used_keys & 1502 if (dissector->used_keys &
1502 ~(BIT(FLOW_DISSECTOR_KEY_META) | 1503 ~(BIT(FLOW_DISSECTOR_KEY_META) |
@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1524 } 1525 }
1525 1526
1526 if (mlx5e_get_tc_tun(filter_dev)) { 1527 if (mlx5e_get_tc_tun(filter_dev)) {
1527 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1528 if (parse_tunnel_attr(priv, spec, f, filter_dev,
1529 outer_match_level))
1528 return -EOPNOTSUPP; 1530 return -EOPNOTSUPP;
1529 1531
1530 /* In decap flow, header pointers should point to the inner 1532 /* At this point, header pointers should point to the inner
1531 * headers, outer header were already set by parse_tunnel_attr 1533 * headers, outer header were already set by parse_tunnel_attr
1532 */ 1534 */
1535 match_level = inner_match_level;
1533 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1536 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1534 spec); 1537 spec);
1535 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1538 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1831 struct flow_cls_offload *f, 1834 struct flow_cls_offload *f,
1832 struct net_device *filter_dev) 1835 struct net_device *filter_dev)
1833{ 1836{
1837 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
1834 struct netlink_ext_ack *extack = f->common.extack; 1838 struct netlink_ext_ack *extack = f->common.extack;
1835 struct mlx5_core_dev *dev = priv->mdev; 1839 struct mlx5_core_dev *dev = priv->mdev;
1836 struct mlx5_eswitch *esw = dev->priv.eswitch; 1840 struct mlx5_eswitch *esw = dev->priv.eswitch;
1837 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1841 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1838 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1839 struct mlx5_eswitch_rep *rep; 1842 struct mlx5_eswitch_rep *rep;
1840 int err; 1843 int err;
1841 1844
1842 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1845 inner_match_level = MLX5_MATCH_NONE;
1846 outer_match_level = MLX5_MATCH_NONE;
1847
1848 err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
1849 &outer_match_level);
1850 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
1851 outer_match_level : inner_match_level;
1843 1852
1844 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1853 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1845 rep = rpriv->rep; 1854 rep = rpriv->rep;
1846 if (rep->vport != MLX5_VPORT_UPLINK && 1855 if (rep->vport != MLX5_VPORT_UPLINK &&
1847 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 1856 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1848 esw->offloads.inline_mode < match_level)) { 1857 esw->offloads.inline_mode < non_tunnel_match_level)) {
1849 NL_SET_ERR_MSG_MOD(extack, 1858 NL_SET_ERR_MSG_MOD(extack,
1850 "Flow is not offloaded due to min inline setting"); 1859 "Flow is not offloaded due to min inline setting");
1851 netdev_warn(priv->netdev, 1860 netdev_warn(priv->netdev,
1852 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 1861 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1853 match_level, esw->offloads.inline_mode); 1862 non_tunnel_match_level, esw->offloads.inline_mode);
1854 return -EOPNOTSUPP; 1863 return -EOPNOTSUPP;
1855 } 1864 }
1856 } 1865 }
1857 1866
1858 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1867 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1859 flow->esw_attr->match_level = match_level; 1868 flow->esw_attr->inner_match_level = inner_match_level;
1860 flow->esw_attr->tunnel_match_level = tunnel_match_level; 1869 flow->esw_attr->outer_match_level = outer_match_level;
1861 } else { 1870 } else {
1862 flow->nic_attr->match_level = match_level; 1871 flow->nic_attr->match_level = non_tunnel_match_level;
1863 } 1872 }
1864 1873
1865 return err; 1874 return err;
@@ -3158,7 +3167,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3158 3167
3159 esw_attr->parse_attr = parse_attr; 3168 esw_attr->parse_attr = parse_attr;
3160 esw_attr->chain = f->common.chain_index; 3169 esw_attr->chain = f->common.chain_index;
3161 esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; 3170 esw_attr->prio = f->common.prio;
3162 3171
3163 esw_attr->in_rep = in_rep; 3172 esw_attr->in_rep = in_rep;
3164 esw_attr->in_mdev = in_mdev; 3173 esw_attr->in_mdev = in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index c50b6f0769c8..49b06b256c92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,7 +49,7 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
49static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) 49static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
50{ 50{
51 struct mlx5e_sq_stats *stats = sq->stats; 51 struct mlx5e_sq_stats *stats = sq->stats;
52 struct dim_sample dim_sample; 52 struct dim_sample dim_sample = {};
53 53
54 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) 54 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
55 return; 55 return;
@@ -61,7 +61,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
61static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) 61static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
62{ 62{
63 struct mlx5e_rq_stats *stats = rq->stats; 63 struct mlx5e_rq_stats *stats = rq->stats;
64 struct dim_sample dim_sample; 64 struct dim_sample dim_sample = {};
65 65
66 if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) 66 if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
67 return; 67 return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a38e8a3c7c9a..04685dbb280c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
377 struct mlx5_termtbl_handle *termtbl; 377 struct mlx5_termtbl_handle *termtbl;
378 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 378 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
379 u32 mod_hdr_id; 379 u32 mod_hdr_id;
380 u8 match_level; 380 u8 inner_match_level;
381 u8 tunnel_match_level; 381 u8 outer_match_level;
382 struct mlx5_fc *counter; 382 struct mlx5_fc *counter;
383 u32 chain; 383 u32 chain;
384 u16 prio; 384 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 089ae4d48a82..0323fd078271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
207 207
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
209 209
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 210 if (attr->outer_match_level != MLX5_MATCH_NONE)
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
217 } 212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
218 214
219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
220 flow_act.modify_id = attr->mod_hdr_id; 216 flow_act.modify_id = attr->mod_hdr_id;
@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
290 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
291 287
292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
293 if (attr->match_level != MLX5_MATCH_NONE) 289 if (attr->outer_match_level != MLX5_MATCH_NONE)
294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
295 291
296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 292 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c48c382f926f..c1252d6be0ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -68,7 +68,7 @@ enum fs_flow_table_type {
68 FS_FT_SNIFFER_RX = 0X5, 68 FS_FT_SNIFFER_RX = 0X5,
69 FS_FT_SNIFFER_TX = 0X6, 69 FS_FT_SNIFFER_TX = 0X6,
70 FS_FT_RDMA_RX = 0X7, 70 FS_FT_RDMA_RX = 0X7,
71 FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, 71 FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
72}; 72};
73 73
74enum fs_flow_table_op_mod { 74enum fs_flow_table_op_mod {
@@ -275,7 +275,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
275 (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ 275 (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
276 (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ 276 (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
277 (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ 277 (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
278 (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\ 278 (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
279 (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
279 ) 280 )
280 281
281#endif 282#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b3762123a69c..1834d9f3aa1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -369,6 +369,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
369} 369}
370EXPORT_SYMBOL(mlx5_fc_query); 370EXPORT_SYMBOL(mlx5_fc_query);
371 371
372u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
373{
374 return counter->cache.lastuse;
375}
376
372void mlx5_fc_query_cached(struct mlx5_fc *counter, 377void mlx5_fc_query_cached(struct mlx5_fc *counter,
373 u64 *bytes, u64 *packets, u64 *lastuse) 378 u64 *bytes, u64 *packets, u64 *lastuse)
374{ 379{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9314777d99e3..d685122d9ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
590 data_size = crdump_size - offset; 590 data_size = crdump_size - offset;
591 else 591 else
592 data_size = MLX5_CR_DUMP_CHUNK_SIZE; 592 data_size = MLX5_CR_DUMP_CHUNK_SIZE;
593 err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); 593 err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
594 data_size);
594 if (err) 595 if (err)
595 goto free_data; 596 goto free_data;
596 } 597 }
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
700 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 701 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
701 goto out; 702 goto out;
702 703
704 fatal_error = check_fatal_sensors(dev);
705
706 if (fatal_error && !health->fatal_error) {
707 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
708 dev->priv.health.fatal_error = fatal_error;
709 print_health_info(dev);
710 mlx5_trigger_health_work(dev);
711 goto out;
712 }
713
703 count = ioread32be(health->health_counter); 714 count = ioread32be(health->health_counter);
704 if (count == health->prev) 715 if (count == health->prev)
705 ++health->miss_counter; 716 ++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
718 if (health->synd && health->synd != prev_synd) 729 if (health->synd && health->synd != prev_synd)
719 queue_work(health->wq, &health->report_work); 730 queue_work(health->wq, &health->report_work);
720 731
721 fatal_error = check_fatal_sensors(dev);
722
723 if (fatal_error && !health->fatal_error) {
724 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
725 dev->priv.health.fatal_error = fatal_error;
726 print_health_info(dev);
727 mlx5_trigger_health_work(dev);
728 }
729
730out: 732out:
731 mod_timer(&health->timer, get_next_poll_jiffies()); 733 mod_timer(&health->timer, get_next_poll_jiffies());
732} 734}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ebd81f6b556e..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
122 return mlx5e_ethtool_get_ts_info(priv, info); 122 return mlx5e_ethtool_get_ts_info(priv, info);
123} 123}
124 124
125static int mlx5i_flash_device(struct net_device *netdev,
126 struct ethtool_flash *flash)
127{
128 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
129
130 return mlx5e_ethtool_flash_device(priv, flash);
131}
132
125enum mlx5_ptys_width { 133enum mlx5_ptys_width {
126 MLX5_PTYS_WIDTH_1X = 1 << 0, 134 MLX5_PTYS_WIDTH_1X = 1 << 0,
127 MLX5_PTYS_WIDTH_2X = 1 << 1, 135 MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
233 .get_ethtool_stats = mlx5i_get_ethtool_stats, 241 .get_ethtool_stats = mlx5i_get_ethtool_stats,
234 .get_ringparam = mlx5i_get_ringparam, 242 .get_ringparam = mlx5i_get_ringparam,
235 .set_ringparam = mlx5i_set_ringparam, 243 .set_ringparam = mlx5i_set_ringparam,
244 .flash_device = mlx5i_flash_device,
236 .get_channels = mlx5i_get_channels, 245 .get_channels = mlx5i_get_channels,
237 .set_channels = mlx5i_set_channels, 246 .set_channels = mlx5i_set_channels,
238 .get_coalesce = mlx5i_get_coalesce, 247 .get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 6bfaaab362dc..1a2560e3bf7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -88,8 +88,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
88 netdev->mtu = netdev->max_mtu; 88 netdev->mtu = netdev->max_mtu;
89 89
90 mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params, 90 mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
91 mlx5e_get_netdev_max_channels(netdev), 91 priv->max_nch, netdev->mtu);
92 netdev->mtu);
93 mlx5i_build_nic_params(mdev, &priv->channels.params); 92 mlx5i_build_nic_params(mdev, &priv->channels.params);
94 93
95 mlx5e_timestamp_init(priv); 94 mlx5e_timestamp_init(priv);
@@ -118,11 +117,10 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
118 117
119static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) 118static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
120{ 119{
121 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
122 struct mlx5e_sw_stats s = { 0 }; 120 struct mlx5e_sw_stats s = { 0 };
123 int i, j; 121 int i, j;
124 122
125 for (i = 0; i < max_nch; i++) { 123 for (i = 0; i < priv->max_nch; i++) {
126 struct mlx5e_channel_stats *channel_stats; 124 struct mlx5e_channel_stats *channel_stats;
127 struct mlx5e_rq_stats *rq_stats; 125 struct mlx5e_rq_stats *rq_stats;
128 126
@@ -436,6 +434,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
436 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, 434 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
437 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ 435 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
438 .max_tc = MLX5I_MAX_NUM_TC, 436 .max_tc = MLX5I_MAX_NUM_TC,
437 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
439}; 438};
440 439
441/* mlx5i netdev NDos */ 440/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 6e56fa769d2e..c5a491e22e55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -355,6 +355,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
355 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, 355 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
356 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ 356 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
357 .max_tc = MLX5I_MAX_NUM_TC, 357 .max_tc = MLX5I_MAX_NUM_TC,
358 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
358}; 359};
359 360
360const struct mlx5e_profile *mlx5i_pkey_get_profile(void) 361const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea9ee88491e5..ea1d4d26ece0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
27 case 128: 27 case 128:
28 general_obj_key_size = 28 general_obj_key_size =
29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; 29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
30 key_p += sz_bytes;
30 break; 31 break;
31 case 256: 32 case 256:
32 general_obj_key_size = 33 general_obj_key_size =
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4d34d42b3b0e..eda9c23e87b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1604 bool register_block = false; 1604 bool register_block = false;
1605 int err; 1605 int err;
1606 1606
1607 block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1607 block_cb = flow_block_cb_lookup(f->block,
1608 mlxsw_sp_setup_tc_block_cb_flower,
1608 mlxsw_sp); 1609 mlxsw_sp);
1609 if (!block_cb) { 1610 if (!block_cb) {
1610 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1611 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
1611 if (!acl_block) 1612 if (!acl_block)
1612 return -ENOMEM; 1613 return -ENOMEM;
1613 block_cb = flow_block_cb_alloc(f->net, 1614 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
1614 mlxsw_sp_setup_tc_block_cb_flower,
1615 mlxsw_sp, acl_block, 1615 mlxsw_sp, acl_block,
1616 mlxsw_sp_tc_block_flower_release); 1616 mlxsw_sp_tc_block_flower_release);
1617 if (IS_ERR(block_cb)) { 1617 if (IS_ERR(block_cb)) {
@@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1657 struct flow_block_cb *block_cb; 1657 struct flow_block_cb *block_cb;
1658 int err; 1658 int err;
1659 1659
1660 block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1660 block_cb = flow_block_cb_lookup(f->block,
1661 mlxsw_sp_setup_tc_block_cb_flower,
1661 mlxsw_sp); 1662 mlxsw_sp);
1662 if (!block_cb) 1663 if (!block_cb)
1663 return; 1664 return;
@@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1680 struct flow_block_offload *f) 1681 struct flow_block_offload *f)
1681{ 1682{
1682 struct flow_block_cb *block_cb; 1683 struct flow_block_cb *block_cb;
1683 tc_setup_cb_t *cb; 1684 flow_setup_cb_t *cb;
1684 bool ingress; 1685 bool ingress;
1685 int err; 1686 int err;
1686 1687
@@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1702 &mlxsw_sp_block_cb_list)) 1703 &mlxsw_sp_block_cb_list))
1703 return -EBUSY; 1704 return -EBUSY;
1704 1705
1705 block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, 1706 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
1706 mlxsw_sp_port, NULL); 1707 mlxsw_sp_port, NULL);
1707 if (IS_ERR(block_cb)) 1708 if (IS_ERR(block_cb))
1708 return PTR_ERR(block_cb); 1709 return PTR_ERR(block_cb);
@@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1718 case FLOW_BLOCK_UNBIND: 1719 case FLOW_BLOCK_UNBIND:
1719 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1720 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1720 f, ingress); 1721 f, ingress);
1721 block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); 1722 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
1722 if (!block_cb) 1723 if (!block_cb)
1723 return -ENOENT; 1724 return -ENOENT;
1724 1725
@@ -6329,7 +6330,7 @@ static int __init mlxsw_sp_module_init(void)
6329 return 0; 6330 return 0;
6330 6331
6331err_sp2_pci_driver_register: 6332err_sp2_pci_driver_register:
6332 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6333 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
6333err_sp1_pci_driver_register: 6334err_sp1_pci_driver_register:
6334 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6335 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
6335err_sp2_core_driver_register: 6336err_sp2_core_driver_register:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 131f62ce9297..6664119fb0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -951,4 +951,8 @@ void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
951int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp); 951int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
952void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp); 952void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
953 953
954/* spectrum_nve_vxlan.c */
955int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
956void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
957
954#endif 958#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index e8ac90564dbe..84a87d059333 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -471,7 +471,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
472 unsigned int priority) 472 unsigned int priority)
473{ 473{
474 rulei->priority = priority >> 16; 474 rulei->priority = priority;
475} 475}
476 476
477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 1537f70bc26d..888ba4300bcc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
437 MLXSW_SP1_SB_PR_CPU_SIZE, true, false), 437 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
438}; 438};
439 439
440#define MLXSW_SP2_SB_PR_INGRESS_SIZE 38128752 440#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
441#define MLXSW_SP2_SB_PR_EGRESS_SIZE 38128752 441#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
442#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) 442#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
443 443
444/* Order according to mlxsw_sp2_sb_pool_dess */ 444/* Order according to mlxsw_sp2_sb_pool_dess */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 1df164a4b06d..17f334b46c40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -775,6 +775,7 @@ static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
775 ops->fini(nve); 775 ops->fini(nve);
776 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, 776 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
777 nve->tunnel_index); 777 nve->tunnel_index);
778 memset(&nve->config, 0, sizeof(nve->config));
778 } 779 }
779 nve->num_nve_tunnels--; 780 nve->num_nve_tunnels--;
780} 781}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 0035640156a1..12f664f42f21 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -29,6 +29,7 @@ struct mlxsw_sp_nve {
29 unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; 29 unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
30 u32 tunnel_index; 30 u32 tunnel_index;
31 u16 ul_rif_index; /* Reserved for Spectrum */ 31 u16 ul_rif_index; /* Reserved for Spectrum */
32 unsigned int inc_parsing_depth_refs;
32}; 33};
33 34
34struct mlxsw_sp_nve_ops { 35struct mlxsw_sp_nve_ops {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 93ccd9fc2266..05517c7feaa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -103,9 +103,9 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
103 config->udp_dport = cfg->dst_port; 103 config->udp_dport = cfg->dst_port;
104} 104}
105 105
106static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, 106static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
107 unsigned int parsing_depth, 107 unsigned int parsing_depth,
108 __be16 udp_dport) 108 __be16 udp_dport)
109{ 109{
110 char mprs_pl[MLXSW_REG_MPRS_LEN]; 110 char mprs_pl[MLXSW_REG_MPRS_LEN];
111 111
@@ -113,6 +113,56 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
113 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 113 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
114} 114}
115 115
116static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
117 __be16 udp_dport)
118{
119 int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ?
120 MLXSW_SP_NVE_VXLAN_PARSING_DEPTH :
121 MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH;
122
123 return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport);
124}
125
126static int
127__mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp,
128 __be16 udp_dport)
129{
130 int err;
131
132 mlxsw_sp->nve->inc_parsing_depth_refs++;
133
134 err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
135 if (err)
136 goto err_nve_parsing_set;
137 return 0;
138
139err_nve_parsing_set:
140 mlxsw_sp->nve->inc_parsing_depth_refs--;
141 return err;
142}
143
144static void
145__mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp,
146 __be16 udp_dport)
147{
148 mlxsw_sp->nve->inc_parsing_depth_refs--;
149 mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
150}
151
152int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp)
153{
154 __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
155
156 return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport);
157}
158
159void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp)
160{
161 __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
162
163 __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport);
164}
165
116static void 166static void
117mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, 167mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
118 const struct mlxsw_sp_nve_config *config) 168 const struct mlxsw_sp_nve_config *config)
@@ -176,9 +226,7 @@ static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
176 struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; 226 struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
177 int err; 227 int err;
178 228
179 err = mlxsw_sp_nve_parsing_set(mlxsw_sp, 229 err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
180 MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
181 config->udp_dport);
182 if (err) 230 if (err)
183 return err; 231 return err;
184 232
@@ -203,8 +251,7 @@ err_promote_decap:
203err_rtdp_set: 251err_rtdp_set:
204 mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); 252 mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
205err_config_set: 253err_config_set:
206 mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, 254 __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
207 config->udp_dport);
208 return err; 255 return err;
209} 256}
210 257
@@ -216,8 +263,7 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
216 mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, 263 mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
217 config->ul_proto, &config->ul_sip); 264 config->ul_proto, &config->ul_sip);
218 mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); 265 mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
219 mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, 266 __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
220 config->udp_dport);
221} 267}
222 268
223static int 269static int
@@ -320,9 +366,7 @@ static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
320 struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; 366 struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
321 int err; 367 int err;
322 368
323 err = mlxsw_sp_nve_parsing_set(mlxsw_sp, 369 err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
324 MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
325 config->udp_dport);
326 if (err) 370 if (err)
327 return err; 371 return err;
328 372
@@ -348,8 +392,7 @@ err_promote_decap:
348err_rtdp_set: 392err_rtdp_set:
349 mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); 393 mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
350err_config_set: 394err_config_set:
351 mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, 395 __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
352 config->udp_dport);
353 return err; 396 return err;
354} 397}
355 398
@@ -361,8 +404,7 @@ static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
361 mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, 404 mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
362 config->ul_proto, &config->ul_sip); 405 config->ul_proto, &config->ul_sip);
363 mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); 406 mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
364 mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, 407 __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
365 config->udp_dport);
366} 408}
367 409
368const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { 410const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index bd9c2bc2d5d6..38bb1cfe4e8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -29,7 +29,7 @@
29 29
30struct mlxsw_sp_ptp_state { 30struct mlxsw_sp_ptp_state {
31 struct mlxsw_sp *mlxsw_sp; 31 struct mlxsw_sp *mlxsw_sp;
32 struct rhashtable unmatched_ht; 32 struct rhltable unmatched_ht;
33 spinlock_t unmatched_lock; /* protects the HT */ 33 spinlock_t unmatched_lock; /* protects the HT */
34 struct delayed_work ht_gc_dw; 34 struct delayed_work ht_gc_dw;
35 u32 gc_cycle; 35 u32 gc_cycle;
@@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key {
45 45
46struct mlxsw_sp1_ptp_unmatched { 46struct mlxsw_sp1_ptp_unmatched {
47 struct mlxsw_sp1_ptp_key key; 47 struct mlxsw_sp1_ptp_key key;
48 struct rhash_head ht_node; 48 struct rhlist_head ht_node;
49 struct rcu_head rcu; 49 struct rcu_head rcu;
50 struct sk_buff *skb; 50 struct sk_buff *skb;
51 u64 timestamp; 51 u64 timestamp;
@@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on 359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
360 * error. 360 * error.
361 */ 361 */
362static struct mlxsw_sp1_ptp_unmatched * 362static int
363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, 363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
364 struct mlxsw_sp1_ptp_key key, 364 struct mlxsw_sp1_ptp_key key,
365 struct sk_buff *skb, 365 struct sk_buff *skb,
@@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL; 368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state; 369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
370 struct mlxsw_sp1_ptp_unmatched *unmatched; 370 struct mlxsw_sp1_ptp_unmatched *unmatched;
371 struct mlxsw_sp1_ptp_unmatched *conflict; 371 int err;
372 372
373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC); 373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
374 if (!unmatched) 374 if (!unmatched)
375 return ERR_PTR(-ENOMEM); 375 return -ENOMEM;
376 376
377 unmatched->key = key; 377 unmatched->key = key;
378 unmatched->skb = skb; 378 unmatched->skb = skb;
379 unmatched->timestamp = timestamp; 379 unmatched->timestamp = timestamp;
380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles; 380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
381 381
382 conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht, 382 err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
383 &unmatched->ht_node, 383 mlxsw_sp1_ptp_unmatched_ht_params);
384 mlxsw_sp1_ptp_unmatched_ht_params); 384 if (err)
385 if (conflict)
386 kfree(unmatched); 385 kfree(unmatched);
387 386
388 return conflict; 387 return err;
389} 388}
390 389
391static struct mlxsw_sp1_ptp_unmatched * 390static struct mlxsw_sp1_ptp_unmatched *
392mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp, 391mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
393 struct mlxsw_sp1_ptp_key key) 392 struct mlxsw_sp1_ptp_key key, int *p_length)
394{ 393{
395 return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, 394 struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
396 mlxsw_sp1_ptp_unmatched_ht_params); 395 struct rhlist_head *tmp, *list;
396 int length = 0;
397
398 list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
399 mlxsw_sp1_ptp_unmatched_ht_params);
400 rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
401 last = unmatched;
402 length++;
403 }
404
405 *p_length = length;
406 return last;
397} 407}
398 408
399static int 409static int
400mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, 410mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
401 struct mlxsw_sp1_ptp_unmatched *unmatched) 411 struct mlxsw_sp1_ptp_unmatched *unmatched)
402{ 412{
403 return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht, 413 return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
404 &unmatched->ht_node, 414 &unmatched->ht_node,
405 mlxsw_sp1_ptp_unmatched_ht_params); 415 mlxsw_sp1_ptp_unmatched_ht_params);
406} 416}
407 417
408/* This function is called in the following scenarios: 418/* This function is called in the following scenarios:
@@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
489 struct mlxsw_sp1_ptp_key key, 499 struct mlxsw_sp1_ptp_key key,
490 struct sk_buff *skb, u64 timestamp) 500 struct sk_buff *skb, u64 timestamp)
491{ 501{
492 struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict; 502 struct mlxsw_sp1_ptp_unmatched *unmatched;
503 int length;
493 int err; 504 int err;
494 505
495 rcu_read_lock(); 506 rcu_read_lock();
496 507
497 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
498
499 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock); 508 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
500 509
501 if (unmatched) { 510 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
502 /* There was an unmatched entry when we looked, but it may have
503 * been removed before we took the lock.
504 */
505 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
506 if (err)
507 unmatched = NULL;
508 }
509
510 if (!unmatched) {
511 /* We have no unmatched entry, but one may have been added after
512 * we looked, but before we took the lock.
513 */
514 unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
515 skb, timestamp);
516 if (IS_ERR(unmatched)) {
517 if (skb)
518 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
519 key.local_port,
520 key.ingress, NULL);
521 unmatched = NULL;
522 } else if (unmatched) {
523 /* Save just told us, under lock, that the entry is
524 * there, so this has to work.
525 */
526 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
527 unmatched);
528 WARN_ON_ONCE(err);
529 }
530 }
531
532 /* If unmatched is non-NULL here, it comes either from the lookup, or
533 * from the save attempt above. In either case the entry was removed
534 * from the hash table. If unmatched is NULL, a new unmatched entry was
535 * added to the hash table, and there was no conflict.
536 */
537
538 if (skb && unmatched && unmatched->timestamp) { 511 if (skb && unmatched && unmatched->timestamp) {
539 unmatched->skb = skb; 512 unmatched->skb = skb;
540 } else if (timestamp && unmatched && unmatched->skb) { 513 } else if (timestamp && unmatched && unmatched->skb) {
541 unmatched->timestamp = timestamp; 514 unmatched->timestamp = timestamp;
542 } else if (unmatched) { 515 } else {
543 /* unmatched holds an older entry of the same type: either an 516 /* Either there is no entry to match, or one that is there is
544 * skb if we are handling skb, or a timestamp if we are handling 517 * incompatible.
545 * timestamp. We can't match that up, so save what we have.
546 */ 518 */
547 conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, 519 if (length < 100)
548 skb, timestamp); 520 err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
549 if (IS_ERR(conflict)) { 521 skb, timestamp);
550 if (skb) 522 else
551 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, 523 err = -E2BIG;
552 key.local_port, 524 if (err && skb)
553 key.ingress, NULL); 525 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
554 } else { 526 key.local_port,
555 /* Above, we removed an object with this key from the 527 key.ingress, NULL);
556 * hash table, under lock, so conflict can not be a 528 unmatched = NULL;
557 * valid pointer. 529 }
558 */ 530
559 WARN_ON_ONCE(conflict); 531 if (unmatched) {
560 } 532 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
533 WARN_ON_ONCE(err);
561 } 534 }
562 535
563 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock); 536 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
@@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
669 local_bh_disable(); 642 local_bh_disable();
670 643
671 spin_lock(&ptp_state->unmatched_lock); 644 spin_lock(&ptp_state->unmatched_lock);
672 err = rhashtable_remove_fast(&ptp_state->unmatched_ht, 645 err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
673 &unmatched->ht_node, 646 mlxsw_sp1_ptp_unmatched_ht_params);
674 mlxsw_sp1_ptp_unmatched_ht_params);
675 spin_unlock(&ptp_state->unmatched_lock); 647 spin_unlock(&ptp_state->unmatched_lock);
676 648
677 if (err) 649 if (err)
@@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
702 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw); 674 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
703 gc_cycle = ptp_state->gc_cycle++; 675 gc_cycle = ptp_state->gc_cycle++;
704 676
705 rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter); 677 rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
706 rhashtable_walk_start(&iter); 678 rhashtable_walk_start(&iter);
707 while ((obj = rhashtable_walk_next(&iter))) { 679 while ((obj = rhashtable_walk_next(&iter))) {
708 if (IS_ERR(obj)) 680 if (IS_ERR(obj))
@@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
855 827
856 spin_lock_init(&ptp_state->unmatched_lock); 828 spin_lock_init(&ptp_state->unmatched_lock);
857 829
858 err = rhashtable_init(&ptp_state->unmatched_ht, 830 err = rhltable_init(&ptp_state->unmatched_ht,
859 &mlxsw_sp1_ptp_unmatched_ht_params); 831 &mlxsw_sp1_ptp_unmatched_ht_params);
860 if (err) 832 if (err)
861 goto err_hashtable_init; 833 goto err_hashtable_init;
862 834
@@ -891,7 +863,7 @@ err_fifo_clr:
891err_mtptpt1_set: 863err_mtptpt1_set:
892 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 864 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
893err_mtptpt_set: 865err_mtptpt_set:
894 rhashtable_destroy(&ptp_state->unmatched_ht); 866 rhltable_destroy(&ptp_state->unmatched_ht);
895err_hashtable_init: 867err_hashtable_init:
896 kfree(ptp_state); 868 kfree(ptp_state);
897 return ERR_PTR(err); 869 return ERR_PTR(err);
@@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
906 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false); 878 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
907 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0); 879 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
908 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 880 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
909 rhashtable_free_and_destroy(&ptp_state->unmatched_ht, 881 rhltable_free_and_destroy(&ptp_state->unmatched_ht,
910 &mlxsw_sp1_ptp_unmatched_free_fn, NULL); 882 &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
911 kfree(ptp_state); 883 kfree(ptp_state);
912} 884}
913 885
@@ -979,6 +951,9 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
979{ 951{
980 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 952 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
981 struct mlxsw_sp_port *tmp; 953 struct mlxsw_sp_port *tmp;
954 u16 orig_ing_types = 0;
955 u16 orig_egr_types = 0;
956 int err;
982 int i; 957 int i;
983 958
984 /* MTPPPC configures timestamping globally, not per port. Find the 959 /* MTPPPC configures timestamping globally, not per port. Find the
@@ -986,12 +961,26 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
986 */ 961 */
987 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { 962 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
988 tmp = mlxsw_sp->ports[i]; 963 tmp = mlxsw_sp->ports[i];
964 if (tmp) {
965 orig_ing_types |= tmp->ptp.ing_types;
966 orig_egr_types |= tmp->ptp.egr_types;
967 }
989 if (tmp && tmp != mlxsw_sp_port) { 968 if (tmp && tmp != mlxsw_sp_port) {
990 ing_types |= tmp->ptp.ing_types; 969 ing_types |= tmp->ptp.ing_types;
991 egr_types |= tmp->ptp.egr_types; 970 egr_types |= tmp->ptp.egr_types;
992 } 971 }
993 } 972 }
994 973
974 if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
975 err = mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp);
976 if (err) {
977 netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
978 return err;
979 }
980 }
981 if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
982 mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp);
983
995 return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp, 984 return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
996 ing_types, egr_types); 985 ing_types, egr_types);
997} 986}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index b71e4ecbe469..6932e615d4b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1818,6 +1818,7 @@ EXPORT_SYMBOL(ocelot_init);
1818 1818
1819void ocelot_deinit(struct ocelot *ocelot) 1819void ocelot_deinit(struct ocelot *ocelot)
1820{ 1820{
1821 cancel_delayed_work(&ocelot->stats_work);
1821 destroy_workqueue(ocelot->stats_queue); 1822 destroy_workqueue(ocelot->stats_queue);
1822 mutex_destroy(&ocelot->stats_lock); 1823 mutex_destroy(&ocelot->stats_lock);
1823 ocelot_ace_deinit(); 1824 ocelot_ace_deinit();
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 39aca1ab4687..86fc6e6b46dd 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
317 break; 317 break;
318 case OCELOT_ACL_ACTION_TRAP: 318 case OCELOT_ACL_ACTION_TRAP:
319 VCAP_ACT_SET(PORT_MASK, 0x0); 319 VCAP_ACT_SET(PORT_MASK, 0x0);
320 VCAP_ACT_SET(MASK_MODE, 0x0); 320 VCAP_ACT_SET(MASK_MODE, 0x1);
321 VCAP_ACT_SET(POLICE_ENA, 0x0); 321 VCAP_ACT_SET(POLICE_ENA, 0x0);
322 VCAP_ACT_SET(POLICE_IDX, 0x0); 322 VCAP_ACT_SET(POLICE_IDX, 0x0);
323 VCAP_ACT_SET(CPU_QU_NUM, 0x0); 323 VCAP_ACT_SET(CPU_QU_NUM, 0x0);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 7aaddc09c185..b894bc0c9c16 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -13,12 +13,6 @@ struct ocelot_port_block {
13 struct ocelot_port *port; 13 struct ocelot_port *port;
14}; 14};
15 15
16static u16 get_prio(u32 prio)
17{
18 /* prio starts from 0x1000 while the ids starts from 0 */
19 return prio >> 16;
20}
21
22static int ocelot_flower_parse_action(struct flow_cls_offload *f, 16static int ocelot_flower_parse_action(struct flow_cls_offload *f,
23 struct ocelot_ace_rule *rule) 17 struct ocelot_ace_rule *rule)
24{ 18{
@@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
168 } 162 }
169 163
170finished_key_parsing: 164finished_key_parsing:
171 ocelot_rule->prio = get_prio(f->common.prio); 165 ocelot_rule->prio = f->common.prio;
172 ocelot_rule->id = f->cookie; 166 ocelot_rule->id = f->cookie;
173 return ocelot_flower_parse_action(f, ocelot_rule); 167 return ocelot_flower_parse_action(f, ocelot_rule);
174} 168}
@@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
218 struct ocelot_ace_rule rule; 212 struct ocelot_ace_rule rule;
219 int ret; 213 int ret;
220 214
221 rule.prio = get_prio(f->common.prio); 215 rule.prio = f->common.prio;
222 rule.port = port_block->port; 216 rule.port = port_block->port;
223 rule.id = f->cookie; 217 rule.id = f->cookie;
224 218
@@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
236 struct ocelot_ace_rule rule; 230 struct ocelot_ace_rule rule;
237 int ret; 231 int ret;
238 232
239 rule.prio = get_prio(f->common.prio); 233 rule.prio = f->common.prio;
240 rule.port = port_block->port; 234 rule.port = port_block->port;
241 rule.id = f->cookie; 235 rule.id = f->cookie;
242 ret = ocelot_ace_rule_stats_update(&rule); 236 ret = ocelot_ace_rule_stats_update(&rule);
@@ -316,15 +310,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
316 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 310 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
317 return -EOPNOTSUPP; 311 return -EOPNOTSUPP;
318 312
319 block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, 313 block_cb = flow_block_cb_lookup(f->block,
320 port); 314 ocelot_setup_tc_block_cb_flower, port);
321 if (!block_cb) { 315 if (!block_cb) {
322 port_block = ocelot_port_block_create(port); 316 port_block = ocelot_port_block_create(port);
323 if (!port_block) 317 if (!port_block)
324 return -ENOMEM; 318 return -ENOMEM;
325 319
326 block_cb = flow_block_cb_alloc(f->net, 320 block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
327 ocelot_setup_tc_block_cb_flower,
328 port, port_block, 321 port, port_block,
329 ocelot_tc_block_unbind); 322 ocelot_tc_block_unbind);
330 if (IS_ERR(block_cb)) { 323 if (IS_ERR(block_cb)) {
@@ -351,8 +344,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
351{ 344{
352 struct flow_block_cb *block_cb; 345 struct flow_block_cb *block_cb;
353 346
354 block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, 347 block_cb = flow_block_cb_lookup(f->block,
355 port); 348 ocelot_setup_tc_block_cb_flower, port);
356 if (!block_cb) 349 if (!block_cb)
357 return; 350 return;
358 351
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index 9e6464ffae5d..16a6db71ca5e 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
134 struct flow_block_offload *f) 134 struct flow_block_offload *f)
135{ 135{
136 struct flow_block_cb *block_cb; 136 struct flow_block_cb *block_cb;
137 tc_setup_cb_t *cb; 137 flow_setup_cb_t *cb;
138 int err; 138 int err;
139 139
140 netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", 140 netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
@@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
156 if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list)) 156 if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
157 return -EBUSY; 157 return -EBUSY;
158 158
159 block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL); 159 block_cb = flow_block_cb_alloc(cb, port, port, NULL);
160 if (IS_ERR(block_cb)) 160 if (IS_ERR(block_cb))
161 return PTR_ERR(block_cb); 161 return PTR_ERR(block_cb);
162 162
@@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
169 list_add_tail(&block_cb->driver_list, f->driver_block_list); 169 list_add_tail(&block_cb->driver_list, f->driver_block_list);
170 return 0; 170 return 0;
171 case FLOW_BLOCK_UNBIND: 171 case FLOW_BLOCK_UNBIND:
172 block_cb = flow_block_cb_lookup(f, cb, port); 172 block_cb = flow_block_cb_lookup(f->block, cb, port);
173 if (!block_cb) 173 if (!block_cb)
174 return -ENOENT; 174 return -ENOENT;
175 175
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index d8b7fba96d58..337b0cbfd153 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3919 * setup (if available). */ 3919 * setup (if available). */
3920 status = myri10ge_request_irq(mgp); 3920 status = myri10ge_request_irq(mgp);
3921 if (status != 0) 3921 if (status != 0)
3922 goto abort_with_firmware; 3922 goto abort_with_slices;
3923 myri10ge_free_irq(mgp); 3923 myri10ge_free_irq(mgp);
3924 3924
3925 /* Save configuration space to be restored if the 3925 /* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 4054b70d7719..5afcb3c4c2ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1163 bool clr_gpr, lmem_step step) 1163 bool clr_gpr, lmem_step step)
1164{ 1164{
1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
1166 bool first = true, last; 1166 bool first = true, narrow_ld, last;
1167 bool needs_inc = false; 1167 bool needs_inc = false;
1168 swreg stack_off_reg; 1168 swreg stack_off_reg;
1169 u8 prev_gpr = 255; 1169 u8 prev_gpr = 255;
@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1209 1209
1210 needs_inc = true; 1210 needs_inc = true;
1211 } 1211 }
1212
1213 narrow_ld = clr_gpr && size < 8;
1214
1212 if (lm3) { 1215 if (lm3) {
1216 unsigned int nop_cnt;
1217
1213 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1218 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1214 /* For size < 4 one slot will be filled by zeroing of upper. */ 1219 /* For size < 4 one slot will be filled by zeroing of upper,
1215 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1220 * but be careful, that zeroing could be eliminated by zext
1221 * optimization.
1222 */
1223 nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
1224 wrp_nops(nfp_prog, nop_cnt);
1216 } 1225 }
1217 1226
1218 if (clr_gpr && size < 8) 1227 if (narrow_ld)
1219 wrp_zext(nfp_prog, meta, gpr); 1228 wrp_zext(nfp_prog, meta, gpr);
1220 1229
1221 while (size) { 1230 while (size) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index faa8ba012a37..457bdc60f3ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
1318 &nfp_block_cb_list)) 1318 &nfp_block_cb_list))
1319 return -EBUSY; 1319 return -EBUSY;
1320 1320
1321 block_cb = flow_block_cb_alloc(f->net, 1321 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1322 nfp_flower_setup_tc_block_cb,
1323 repr, repr, NULL); 1322 repr, repr, NULL);
1324 if (IS_ERR(block_cb)) 1323 if (IS_ERR(block_cb))
1325 return PTR_ERR(block_cb); 1324 return PTR_ERR(block_cb);
@@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
1328 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); 1327 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1329 return 0; 1328 return 0;
1330 case FLOW_BLOCK_UNBIND: 1329 case FLOW_BLOCK_UNBIND:
1331 block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb, 1330 block_cb = flow_block_cb_lookup(f->block,
1331 nfp_flower_setup_tc_block_cb,
1332 repr); 1332 repr);
1333 if (!block_cb) 1333 if (!block_cb)
1334 return -ENOENT; 1334 return -ENOENT;
@@ -1409,13 +1409,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1409 struct nfp_flower_priv *priv = app->priv; 1409 struct nfp_flower_priv *priv = app->priv;
1410 struct flow_block_cb *block_cb; 1410 struct flow_block_cb *block_cb;
1411 1411
1412 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1412 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1413 !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1413 !nfp_flower_internal_port_can_offload(app, netdev)) ||
1414 nfp_flower_internal_port_can_offload(app, netdev))) 1414 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1415 nfp_flower_internal_port_can_offload(app, netdev)))
1415 return -EOPNOTSUPP; 1416 return -EOPNOTSUPP;
1416 1417
1417 switch (f->command) { 1418 switch (f->command) {
1418 case FLOW_BLOCK_BIND: 1419 case FLOW_BLOCK_BIND:
1420 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1421 if (cb_priv &&
1422 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1423 cb_priv,
1424 &nfp_block_cb_list))
1425 return -EBUSY;
1426
1419 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1427 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1420 if (!cb_priv) 1428 if (!cb_priv)
1421 return -ENOMEM; 1429 return -ENOMEM;
@@ -1424,8 +1432,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1424 cb_priv->app = app; 1432 cb_priv->app = app;
1425 list_add(&cb_priv->list, &priv->indr_block_cb_priv); 1433 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1426 1434
1427 block_cb = flow_block_cb_alloc(f->net, 1435 block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1428 nfp_flower_setup_indr_block_cb,
1429 cb_priv, cb_priv, 1436 cb_priv, cb_priv,
1430 nfp_flower_setup_indr_tc_release); 1437 nfp_flower_setup_indr_tc_release);
1431 if (IS_ERR(block_cb)) { 1438 if (IS_ERR(block_cb)) {
@@ -1442,7 +1449,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1442 if (!cb_priv) 1449 if (!cb_priv)
1443 return -ENOENT; 1450 return -ENOENT;
1444 1451
1445 block_cb = flow_block_cb_lookup(f, 1452 block_cb = flow_block_cb_lookup(f->block,
1446 nfp_flower_setup_indr_block_cb, 1453 nfp_flower_setup_indr_block_cb,
1447 cb_priv); 1454 cb_priv);
1448 if (!block_cb) 1455 if (!block_cb)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 86e968cd5ffd..124a43dc136a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
93 return -EOPNOTSUPP; 93 return -EOPNOTSUPP;
94 } 94 }
95 95
96 if (flow->common.prio != (1 << 16)) { 96 if (flow->common.prio != 1) {
97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); 97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
98 return -EOPNOTSUPP; 98 return -EOPNOTSUPP;
99 } 99 }
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a7a80f4b722a..f0ee982eb1b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
328 328
329 flow.daddr = *(__be32 *)n->primary_key; 329 flow.daddr = *(__be32 *)n->primary_key;
330 330
331 /* Only concerned with route changes for representors. */
332 if (!nfp_netdev_is_nfp_repr(n->dev))
333 return NOTIFY_DONE;
334
335 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 331 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
336 app = app_priv->app; 332 app = app_priv->app;
337 333
334 if (!nfp_netdev_is_nfp_repr(n->dev) &&
335 !nfp_flower_internal_port_can_offload(app, n->dev))
336 return NOTIFY_DONE;
337
338 /* Only concerned with changes to routes already added to NFP. */ 338 /* Only concerned with changes to routes already added to NFP. */
339 if (!nfp_tun_has_route(app, flow.daddr)) 339 if (!nfp_tun_has_route(app, flow.daddr))
340 return NOTIFY_DONE; 340 return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index d9cbe84ac6ad..1b840ee47339 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -444,12 +444,12 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
444 data = nfp_pr_et(data, "hw_rx_csum_complete"); 444 data = nfp_pr_et(data, "hw_rx_csum_complete");
445 data = nfp_pr_et(data, "hw_rx_csum_err"); 445 data = nfp_pr_et(data, "hw_rx_csum_err");
446 data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); 446 data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
447 data = nfp_pr_et(data, "rx_tls_decrypted"); 447 data = nfp_pr_et(data, "rx_tls_decrypted_packets");
448 data = nfp_pr_et(data, "hw_tx_csum"); 448 data = nfp_pr_et(data, "hw_tx_csum");
449 data = nfp_pr_et(data, "hw_tx_inner_csum"); 449 data = nfp_pr_et(data, "hw_tx_inner_csum");
450 data = nfp_pr_et(data, "tx_gather"); 450 data = nfp_pr_et(data, "tx_gather");
451 data = nfp_pr_et(data, "tx_lso"); 451 data = nfp_pr_et(data, "tx_lso");
452 data = nfp_pr_et(data, "tx_tls_encrypted"); 452 data = nfp_pr_et(data, "tx_tls_encrypted_packets");
453 data = nfp_pr_et(data, "tx_tls_ooo"); 453 data = nfp_pr_et(data, "tx_tls_ooo");
454 data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); 454 data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
455 455
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
index 70b1a03c0953..01229190132d 100644
--- a/drivers/net/ethernet/ni/Kconfig
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_NI
11 11
12 Note that the answer to this question doesn't directly affect the 12 Note that the answer to this question doesn't directly affect the
13 kernel: saying N will just cause the configurator to skip all 13 kernel: saying N will just cause the configurator to skip all
14 the questions about National Instrument devices. 14 the questions about National Instruments devices.
15 If you say Y, you will be asked for your specific device in the 15 If you say Y, you will be asked for your specific device in the
16 following questions. 16 following questions.
17 17
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8161e308e64b..ead3750b4489 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -1,10 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2# 2#
3# Packet engine device configuration 3# Packet Engines device configuration
4# 4#
5 5
6config NET_VENDOR_PACKET_ENGINES 6config NET_VENDOR_PACKET_ENGINES
7 bool "Packet Engine devices" 7 bool "Packet Engines devices"
8 default y 8 default y
9 depends on PCI 9 depends on PCI
10 ---help--- 10 ---help---
@@ -12,7 +12,7 @@ config NET_VENDOR_PACKET_ENGINES
12 12
13 Note that the answer to this question doesn't directly affect the 13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all 14 kernel: saying N will just cause the configurator to skip all
15 the questions about packet engine devices. If you say Y, you will 15 the questions about Packet Engines devices. If you say Y, you will
16 be asked for your specific card in the following questions. 16 be asked for your specific card in the following questions.
17 17
18if NET_VENDOR_PACKET_ENGINES 18if NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile
index 1553c9cfc254..cf054b796d11 100644
--- a/drivers/net/ethernet/packetengines/Makefile
+++ b/drivers/net/ethernet/packetengines/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2# 2#
3# Makefile for the Packet Engine network device drivers. 3# Makefile for the Packet Engines network device drivers.
4# 4#
5 5
6obj-$(CONFIG_HAMACHI) += hamachi.o 6obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 4e8118a08654..9f5113639eaf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1093,7 +1093,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
1093 snprintf(bit_name, 30, 1093 snprintf(bit_name, 30,
1094 p_aeu->bit_name, num); 1094 p_aeu->bit_name, num);
1095 else 1095 else
1096 strncpy(bit_name, 1096 strlcpy(bit_name,
1097 p_aeu->bit_name, 30); 1097 p_aeu->bit_name, 30);
1098 1098
1099 /* We now need to pass bitmask in its 1099 /* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 829dd60ab937..1efff7f68ef6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1325 &drv_version); 1325 &drv_version);
1326 if (rc) { 1326 if (rc) {
1327 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1327 DP_NOTICE(cdev, "Failed sending drv version command\n");
1328 return rc; 1328 goto err4;
1329 } 1329 }
1330 } 1330 }
1331 1331
@@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1333 1333
1334 return 0; 1334 return 0;
1335 1335
1336err4:
1337 qed_ll2_dealloc_if(cdev);
1336err3: 1338err3:
1337 qed_hw_stop(cdev); 1339 qed_hw_stop(cdev);
1338err2: 1340err2:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index f900fde448db..158ac0738911 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
442 /* Vendor specific information */ 442 /* Vendor specific information */
443 dev->vendor_id = cdev->vendor_id; 443 dev->vendor_id = cdev->vendor_id;
444 dev->vendor_part_id = cdev->device_id; 444 dev->vendor_part_id = cdev->device_id;
445 dev->hw_ver = 0; 445 dev->hw_ver = cdev->chip_rev;
446 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | 446 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
447 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); 447 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
448 448
@@ -530,9 +530,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
530 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); 530 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
531 531
532 /* Check atomic operations support in PCI configuration space. */ 532 /* Check atomic operations support in PCI configuration space. */
533 pci_read_config_dword(cdev->pdev, 533 pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
534 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, 534 &pci_status_control);
535 &pci_status_control);
536 535
537 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) 536 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
538 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); 537 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 60189923737a..21d38167f961 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -206,9 +206,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
206 ul_header->csum_insert_offset = skb->csum_offset; 206 ul_header->csum_insert_offset = skb->csum_offset;
207 ul_header->csum_enabled = 1; 207 ul_header->csum_enabled = 1;
208 if (ip4h->protocol == IPPROTO_UDP) 208 if (ip4h->protocol == IPPROTO_UDP)
209 ul_header->udp_ip4_ind = 1; 209 ul_header->udp_ind = 1;
210 else 210 else
211 ul_header->udp_ip4_ind = 0; 211 ul_header->udp_ind = 0;
212 212
213 /* Changing remaining fields to network order */ 213 /* Changing remaining fields to network order */
214 hdr++; 214 hdr++;
@@ -239,6 +239,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
239 struct rmnet_map_ul_csum_header *ul_header, 239 struct rmnet_map_ul_csum_header *ul_header,
240 struct sk_buff *skb) 240 struct sk_buff *skb)
241{ 241{
242 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
242 __be16 *hdr = (__be16 *)ul_header, offset; 243 __be16 *hdr = (__be16 *)ul_header, offset;
243 244
244 offset = htons((__force u16)(skb_transport_header(skb) - 245 offset = htons((__force u16)(skb_transport_header(skb) -
@@ -246,7 +247,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
246 ul_header->csum_start_offset = offset; 247 ul_header->csum_start_offset = offset;
247 ul_header->csum_insert_offset = skb->csum_offset; 248 ul_header->csum_insert_offset = skb->csum_offset;
248 ul_header->csum_enabled = 1; 249 ul_header->csum_enabled = 1;
249 ul_header->udp_ip4_ind = 0; 250
251 if (ip6h->nexthdr == IPPROTO_UDP)
252 ul_header->udp_ind = 1;
253 else
254 ul_header->udp_ind = 0;
250 255
251 /* Changing remaining fields to network order */ 256 /* Changing remaining fields to network order */
252 hdr++; 257 hdr++;
@@ -419,7 +424,7 @@ sw_csum:
419 ul_header->csum_start_offset = 0; 424 ul_header->csum_start_offset = 0;
420 ul_header->csum_insert_offset = 0; 425 ul_header->csum_insert_offset = 0;
421 ul_header->csum_enabled = 0; 426 ul_header->csum_enabled = 0;
422 ul_header->udp_ip4_ind = 0; 427 ul_header->udp_ind = 0;
423 428
424 priv->stats.csum_sw++; 429 priv->stats.csum_sw++;
425} 430}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0637c6752a78..bae0074ab9aa 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3251,9 +3251,9 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3251 3251
3252 ret = phy_read_paged(tp->phydev, 0x0a46, 0x13); 3252 ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
3253 if (ret & BIT(8)) 3253 if (ret & BIT(8))
3254 phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1)); 3254 phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
3255 else 3255 else
3256 phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0); 3256 phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
3257 3257
3258 /* Enable PHY auto speed down */ 3258 /* Enable PHY auto speed down */
3259 phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); 3259 phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
@@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5921 skb = napi_alloc_skb(&tp->napi, pkt_size); 5921 skb = napi_alloc_skb(&tp->napi, pkt_size);
5922 if (skb) 5922 if (skb)
5923 skb_copy_to_linear_data(skb, data, pkt_size); 5923 skb_copy_to_linear_data(skb, data, pkt_size);
5924 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5924 5925
5925 return skb; 5926 return skb;
5926} 5927}
@@ -6136,10 +6137,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
6136 if (ret) 6137 if (ret)
6137 return ret; 6138 return ret;
6138 6139
6139 if (tp->supports_gmii) 6140 if (!tp->supports_gmii)
6140 phy_remove_link_mode(phydev,
6141 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
6142 else
6143 phy_set_max_speed(phydev, SPEED_100); 6141 phy_set_max_speed(phydev, SPEED_100);
6144 6142
6145 phy_support_asym_pause(phydev); 6143 phy_support_asym_pause(phydev);
@@ -6589,13 +6587,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
6589{ 6587{
6590 unsigned int flags; 6588 unsigned int flags;
6591 6589
6592 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { 6590 switch (tp->mac_version) {
6591 case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
6593 rtl_unlock_config_regs(tp); 6592 rtl_unlock_config_regs(tp);
6594 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); 6593 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
6595 rtl_lock_config_regs(tp); 6594 rtl_lock_config_regs(tp);
6595 /* fall through */
6596 case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
6596 flags = PCI_IRQ_LEGACY; 6597 flags = PCI_IRQ_LEGACY;
6597 } else { 6598 break;
6599 default:
6598 flags = PCI_IRQ_ALL_TYPES; 6600 flags = PCI_IRQ_ALL_TYPES;
6601 break;
6599 } 6602 }
6600 6603
6601 return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); 6604 return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ef8f08931fe8..6cacd5e893ac 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,7 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
3 * 3 *
4 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * 7 *
@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
513 kfree(ts_skb); 513 kfree(ts_skb);
514 if (tag == tfa_tag) { 514 if (tag == tfa_tag) {
515 skb_tstamp_tx(skb, &shhwtstamps); 515 skb_tstamp_tx(skb, &shhwtstamps);
516 dev_consume_skb_any(skb);
516 break; 517 break;
518 } else {
519 dev_kfree_skb_any(skb);
517 } 520 }
518 } 521 }
519 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); 522 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1564 } 1567 }
1565 goto unmap; 1568 goto unmap;
1566 } 1569 }
1567 ts_skb->skb = skb; 1570 ts_skb->skb = skb_get(skb);
1568 ts_skb->tag = priv->ts_skb_tag++; 1571 ts_skb->tag = priv->ts_skb_tag++;
1569 priv->ts_skb_tag &= 0x3ff; 1572 priv->ts_skb_tag &= 0x3ff;
1570 list_add_tail(&ts_skb->list, &priv->ts_skb_list); 1573 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev)
1693 /* Clear the timestamp list */ 1696 /* Clear the timestamp list */
1694 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { 1697 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1695 list_del(&ts_skb->list); 1698 list_del(&ts_skb->list);
1699 kfree_skb(ts_skb->skb);
1696 kfree(ts_skb); 1700 kfree(ts_skb);
1697 } 1701 }
1698 1702
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 079f459c73a5..2c5d3f5b84dd 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2208,10 +2208,12 @@ static int rocker_router_fib_event(struct notifier_block *nb,
2208 2208
2209 if (fen_info->fi->fib_nh_is_v6) { 2209 if (fen_info->fi->fib_nh_is_v6) {
2210 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); 2210 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
2211 kfree(fib_work);
2211 return notifier_from_errno(-EINVAL); 2212 return notifier_from_errno(-EINVAL);
2212 } 2213 }
2213 if (fen_info->fi->nh) { 2214 if (fen_info->fi->nh) {
2214 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); 2215 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
2216 kfree(fib_work);
2215 return notifier_from_errno(-EINVAL); 2217 return notifier_from_errno(-EINVAL);
2216 } 2218 }
2217 } 2219 }
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
index 027938017579..e92a178a76df 100644
--- a/drivers/net/ethernet/samsung/Kconfig
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_SAMSUNG
11 say Y. 11 say Y.
12 12
13 Note that the answer to this question does not directly affect 13 Note that the answer to this question does not directly affect
14 the kernel: saying N will just case the configurator to skip all 14 the kernel: saying N will just cause the configurator to skip all
15 the questions about Samsung chipsets. If you say Y, you will be asked 15 the questions about Samsung chipsets. If you say Y, you will be asked
16 for your specific chipset/driver in the following questions. 16 for your specific chipset/driver in the following questions.
17 17
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 7a5e6c5abb57..276c7cae7cee 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
794 printk(KERN_ERR "Sgiseeq: Cannot register net device, " 794 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
795 "aborting.\n"); 795 "aborting.\n");
796 err = -ENODEV; 796 err = -ENODEV;
797 goto err_out_free_page; 797 goto err_out_free_attrs;
798 } 798 }
799 799
800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); 800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
801 801
802 return 0; 802 return 0;
803 803
804err_out_free_page: 804err_out_free_attrs:
805 free_page((unsigned long) sp->srings); 805 dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
806 sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
806err_out_free_dev: 807err_out_free_dev:
807 free_netdev(dev); 808 free_netdev(dev);
808 809
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index bd14803545de..8d88e4083456 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -712,6 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev)
712 /* Found an external PHY */ 712 /* Found an external PHY */
713 break; 713 break;
714 } 714 }
715 /* Else, fall through */
715 default: 716 default:
716 /* Internal media only */ 717 /* Internal media only */
717 SMC_GET_PHY_ID1(lp, 1, id1); 718 SMC_GET_PHY_ID1(lp, 1, id1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 4644b2aeeba1..e2e469c37a4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
1194 int ret; 1194 int ret;
1195 struct device *dev = &bsp_priv->pdev->dev; 1195 struct device *dev = &bsp_priv->pdev->dev;
1196 1196
1197 if (!ldo) { 1197 if (!ldo)
1198 dev_err(dev, "no regulator found\n"); 1198 return 0;
1199 return -1;
1200 }
1201 1199
1202 if (enable) { 1200 if (enable) {
1203 ret = regulator_enable(ldo); 1201 ret = regulator_enable(ldo);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 01c2e2d83e76..fc9954e4a772 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -85,6 +85,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
85 u32 value; 85 u32 value;
86 86
87 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; 87 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
88 if (queue >= 4)
89 queue -= 4;
88 90
89 value = readl(ioaddr + base_register); 91 value = readl(ioaddr + base_register);
90 92
@@ -102,6 +104,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
102 u32 value; 104 u32 value;
103 105
104 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; 106 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
107 if (queue >= 4)
108 queue -= 4;
105 109
106 value = readl(ioaddr + base_register); 110 value = readl(ioaddr + base_register);
107 111
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 7f86dffb264d..3174b701aa90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -44,11 +44,13 @@
44#define XGMAC_CORE_INIT_RX 0 44#define XGMAC_CORE_INIT_RX 0
45#define XGMAC_PACKET_FILTER 0x00000008 45#define XGMAC_PACKET_FILTER 0x00000008
46#define XGMAC_FILTER_RA BIT(31) 46#define XGMAC_FILTER_RA BIT(31)
47#define XGMAC_FILTER_HPF BIT(10)
47#define XGMAC_FILTER_PCF BIT(7) 48#define XGMAC_FILTER_PCF BIT(7)
48#define XGMAC_FILTER_PM BIT(4) 49#define XGMAC_FILTER_PM BIT(4)
49#define XGMAC_FILTER_HMC BIT(2) 50#define XGMAC_FILTER_HMC BIT(2)
50#define XGMAC_FILTER_PR BIT(0) 51#define XGMAC_FILTER_PR BIT(0)
51#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) 52#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
53#define XGMAC_MAX_HASH_TABLE 8
52#define XGMAC_RXQ_CTRL0 0x000000a0 54#define XGMAC_RXQ_CTRL0 0x000000a0
53#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) 55#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
54#define XGMAC_RXQEN_SHIFT(x) ((x) * 2) 56#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -99,11 +101,12 @@
99#define XGMAC_MDIO_ADDR 0x00000200 101#define XGMAC_MDIO_ADDR 0x00000200
100#define XGMAC_MDIO_DATA 0x00000204 102#define XGMAC_MDIO_DATA 0x00000204
101#define XGMAC_MDIO_C22P 0x00000220 103#define XGMAC_MDIO_C22P 0x00000220
102#define XGMAC_ADDR0_HIGH 0x00000300 104#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
105#define XGMAC_ADDR_MAX 32
103#define XGMAC_AE BIT(31) 106#define XGMAC_AE BIT(31)
104#define XGMAC_DCS GENMASK(19, 16) 107#define XGMAC_DCS GENMASK(19, 16)
105#define XGMAC_DCS_SHIFT 16 108#define XGMAC_DCS_SHIFT 16
106#define XGMAC_ADDR0_LOW 0x00000304 109#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
107#define XGMAC_ARP_ADDR 0x00000c10 110#define XGMAC_ARP_ADDR 0x00000c10
108#define XGMAC_TIMESTAMP_STATUS 0x00000d20 111#define XGMAC_TIMESTAMP_STATUS 0x00000d20
109#define XGMAC_TXTSC BIT(15) 112#define XGMAC_TXTSC BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 0a32c96a7854..85c68b7ee8c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -4,6 +4,8 @@
4 * stmmac XGMAC support. 4 * stmmac XGMAC support.
5 */ 5 */
6 6
7#include <linux/bitrev.h>
8#include <linux/crc32.h>
7#include "stmmac.h" 9#include "stmmac.h"
8#include "dwxgmac2.h" 10#include "dwxgmac2.h"
9 11
@@ -106,6 +108,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
106 u32 value, reg; 108 u32 value, reg;
107 109
108 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; 110 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
111 if (queue >= 4)
112 queue -= 4;
109 113
110 value = readl(ioaddr + reg); 114 value = readl(ioaddr + reg);
111 value &= ~XGMAC_PSRQ(queue); 115 value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +173,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
169 u32 value, reg; 173 u32 value, reg;
170 174
171 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; 175 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
176 if (queue >= 4)
177 queue -= 4;
172 178
173 value = readl(ioaddr + reg); 179 value = readl(ioaddr + reg);
174 value &= ~XGMAC_QxMDMACH(queue); 180 value &= ~XGMAC_QxMDMACH(queue);
@@ -278,10 +284,10 @@ static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
278 u32 value; 284 u32 value;
279 285
280 value = (addr[5] << 8) | addr[4]; 286 value = (addr[5] << 8) | addr[4];
281 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH); 287 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
282 288
283 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 289 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
284 writel(value, ioaddr + XGMAC_ADDR0_LOW); 290 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
285} 291}
286 292
287static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, 293static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
@@ -291,8 +297,8 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
291 u32 hi_addr, lo_addr; 297 u32 hi_addr, lo_addr;
292 298
293 /* Read the MAC address from the hardware */ 299 /* Read the MAC address from the hardware */
294 hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH); 300 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
295 lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW); 301 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
296 302
297 /* Extract the MAC address from the high and low words */ 303 /* Extract the MAC address from the high and low words */
298 addr[0] = lo_addr & 0xff; 304 addr[0] = lo_addr & 0xff;
@@ -303,19 +309,82 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
303 addr[5] = (hi_addr >> 8) & 0xff; 309 addr[5] = (hi_addr >> 8) & 0xff;
304} 310}
305 311
312static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
313 int mcbitslog2)
314{
315 int numhashregs, regs;
316
317 switch (mcbitslog2) {
318 case 6:
319 numhashregs = 2;
320 break;
321 case 7:
322 numhashregs = 4;
323 break;
324 case 8:
325 numhashregs = 8;
326 break;
327 default:
328 return;
329 }
330
331 for (regs = 0; regs < numhashregs; regs++)
332 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
333}
334
306static void dwxgmac2_set_filter(struct mac_device_info *hw, 335static void dwxgmac2_set_filter(struct mac_device_info *hw,
307 struct net_device *dev) 336 struct net_device *dev)
308{ 337{
309 void __iomem *ioaddr = (void __iomem *)dev->base_addr; 338 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
310 u32 value = XGMAC_FILTER_RA; 339 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
340 int mcbitslog2 = hw->mcast_bits_log2;
341 u32 mc_filter[8];
342 int i;
343
344 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
345 value |= XGMAC_FILTER_HPF;
346
347 memset(mc_filter, 0, sizeof(mc_filter));
311 348
312 if (dev->flags & IFF_PROMISC) { 349 if (dev->flags & IFF_PROMISC) {
313 value |= XGMAC_FILTER_PR | XGMAC_FILTER_PCF; 350 value |= XGMAC_FILTER_PR;
351 value |= XGMAC_FILTER_PCF;
314 } else if ((dev->flags & IFF_ALLMULTI) || 352 } else if ((dev->flags & IFF_ALLMULTI) ||
315 (netdev_mc_count(dev) > HASH_TABLE_SIZE)) { 353 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
316 value |= XGMAC_FILTER_PM; 354 value |= XGMAC_FILTER_PM;
317 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0)); 355
318 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1)); 356 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
357 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
358 } else if (!netdev_mc_empty(dev)) {
359 struct netdev_hw_addr *ha;
360
361 value |= XGMAC_FILTER_HMC;
362
363 netdev_for_each_mc_addr(ha, dev) {
364 int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
365 (32 - mcbitslog2));
366 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
367 }
368 }
369
370 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
371
372 /* Handle multiple unicast addresses */
373 if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
374 value |= XGMAC_FILTER_PR;
375 } else {
376 struct netdev_hw_addr *ha;
377 int reg = 1;
378
379 netdev_for_each_uc_addr(ha, dev) {
380 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
381 reg++;
382 }
383
384 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
385 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
386 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
387 }
319 } 388 }
320 389
321 writel(value, ioaddr + XGMAC_PACKET_FILTER); 390 writel(value, ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c7c9e5f162e6..fd54c7c87485 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -814,20 +814,15 @@ static void stmmac_validate(struct phylink_config *config,
814 phylink_set(mac_supported, 10baseT_Full); 814 phylink_set(mac_supported, 10baseT_Full);
815 phylink_set(mac_supported, 100baseT_Half); 815 phylink_set(mac_supported, 100baseT_Half);
816 phylink_set(mac_supported, 100baseT_Full); 816 phylink_set(mac_supported, 100baseT_Full);
817 phylink_set(mac_supported, 1000baseT_Half);
818 phylink_set(mac_supported, 1000baseT_Full);
819 phylink_set(mac_supported, 1000baseKX_Full);
817 820
818 phylink_set(mac_supported, Autoneg); 821 phylink_set(mac_supported, Autoneg);
819 phylink_set(mac_supported, Pause); 822 phylink_set(mac_supported, Pause);
820 phylink_set(mac_supported, Asym_Pause); 823 phylink_set(mac_supported, Asym_Pause);
821 phylink_set_port_modes(mac_supported); 824 phylink_set_port_modes(mac_supported);
822 825
823 if (priv->plat->has_gmac ||
824 priv->plat->has_gmac4 ||
825 priv->plat->has_xgmac) {
826 phylink_set(mac_supported, 1000baseT_Half);
827 phylink_set(mac_supported, 1000baseT_Full);
828 phylink_set(mac_supported, 1000baseKX_Full);
829 }
830
831 /* Cut down 1G if asked to */ 826 /* Cut down 1G if asked to */
832 if ((max_speed > 0) && (max_speed < 1000)) { 827 if ((max_speed > 0) && (max_speed < 1000)) {
833 phylink_set(mask, 1000baseT_Full); 828 phylink_set(mask, 1000baseT_Full);
@@ -1295,6 +1290,8 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1295 "(%s) dma_rx_phy=0x%08x\n", __func__, 1290 "(%s) dma_rx_phy=0x%08x\n", __func__,
1296 (u32)rx_q->dma_rx_phy); 1291 (u32)rx_q->dma_rx_phy);
1297 1292
1293 stmmac_clear_rx_descriptors(priv, queue);
1294
1298 for (i = 0; i < DMA_RX_SIZE; i++) { 1295 for (i = 0; i < DMA_RX_SIZE; i++) {
1299 struct dma_desc *p; 1296 struct dma_desc *p;
1300 1297
@@ -1312,8 +1309,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1312 rx_q->cur_rx = 0; 1309 rx_q->cur_rx = 0;
1313 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); 1310 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1314 1311
1315 stmmac_clear_rx_descriptors(priv, queue);
1316
1317 /* Setup the chained descriptor addresses */ 1312 /* Setup the chained descriptor addresses */
1318 if (priv->mode == STMMAC_CHAIN_MODE) { 1313 if (priv->mode == STMMAC_CHAIN_MODE) {
1319 if (priv->extend_desc) 1314 if (priv->extend_desc)
@@ -1555,9 +1550,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1555 goto err_dma; 1550 goto err_dma;
1556 } 1551 }
1557 1552
1558 rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE, 1553 rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
1559 sizeof(*rx_q->buf_pool), 1554 GFP_KERNEL);
1560 GFP_KERNEL);
1561 if (!rx_q->buf_pool) 1555 if (!rx_q->buf_pool)
1562 goto err_dma; 1556 goto err_dma;
1563 1557
@@ -1608,15 +1602,15 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1608 tx_q->queue_index = queue; 1602 tx_q->queue_index = queue;
1609 tx_q->priv_data = priv; 1603 tx_q->priv_data = priv;
1610 1604
1611 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, 1605 tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1612 sizeof(*tx_q->tx_skbuff_dma), 1606 sizeof(*tx_q->tx_skbuff_dma),
1613 GFP_KERNEL); 1607 GFP_KERNEL);
1614 if (!tx_q->tx_skbuff_dma) 1608 if (!tx_q->tx_skbuff_dma)
1615 goto err_dma; 1609 goto err_dma;
1616 1610
1617 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, 1611 tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1618 sizeof(struct sk_buff *), 1612 sizeof(struct sk_buff *),
1619 GFP_KERNEL); 1613 GFP_KERNEL);
1620 if (!tx_q->tx_skbuff) 1614 if (!tx_q->tx_skbuff)
1621 goto err_dma; 1615 goto err_dma;
1622 1616
@@ -3277,9 +3271,11 @@ static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3277static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 3271static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3278{ 3272{
3279 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3273 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3280 int dirty = stmmac_rx_dirty(priv, queue); 3274 int len, dirty = stmmac_rx_dirty(priv, queue);
3281 unsigned int entry = rx_q->dirty_rx; 3275 unsigned int entry = rx_q->dirty_rx;
3282 3276
3277 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3278
3283 while (dirty-- > 0) { 3279 while (dirty-- > 0) {
3284 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 3280 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3285 struct dma_desc *p; 3281 struct dma_desc *p;
@@ -3297,6 +3293,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3297 } 3293 }
3298 3294
3299 buf->addr = page_pool_get_dma_addr(buf->page); 3295 buf->addr = page_pool_get_dma_addr(buf->page);
3296
3297 /* Sync whole allocation to device. This will invalidate old
3298 * data.
3299 */
3300 dma_sync_single_for_device(priv->device, buf->addr, len,
3301 DMA_FROM_DEVICE);
3302
3300 stmmac_set_desc_addr(priv, p, buf->addr); 3303 stmmac_set_desc_addr(priv, p, buf->addr);
3301 stmmac_refill_desc3(priv, rx_q, p); 3304 stmmac_refill_desc3(priv, rx_q, p);
3302 3305
@@ -3431,8 +3434,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3431 skb_copy_to_linear_data(skb, page_address(buf->page), 3434 skb_copy_to_linear_data(skb, page_address(buf->page),
3432 frame_len); 3435 frame_len);
3433 skb_put(skb, frame_len); 3436 skb_put(skb, frame_len);
3434 dma_sync_single_for_device(priv->device, buf->addr,
3435 frame_len, DMA_FROM_DEVICE);
3436 3437
3437 if (netif_msg_pktdata(priv)) { 3438 if (netif_msg_pktdata(priv)) {
3438 netdev_dbg(priv->dev, "frame received (%dbytes)", 3439 netdev_dbg(priv->dev, "frame received (%dbytes)",
@@ -4319,8 +4320,9 @@ int stmmac_dvr_probe(struct device *device,
4319 NAPI_POLL_WEIGHT); 4320 NAPI_POLL_WEIGHT);
4320 } 4321 }
4321 if (queue < priv->plat->tx_queues_to_use) { 4322 if (queue < priv->plat->tx_queues_to_use) {
4322 netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx, 4323 netif_tx_napi_add(ndev, &ch->tx_napi,
4323 NAPI_POLL_WEIGHT); 4324 stmmac_napi_poll_tx,
4325 NAPI_POLL_WEIGHT);
4324 } 4326 }
4325 } 4327 }
4326 4328
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 73fc2524372e..154daf4d1072 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -370,6 +370,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
370 return ERR_PTR(-ENOMEM); 370 return ERR_PTR(-ENOMEM);
371 371
372 *mac = of_get_mac_address(np); 372 *mac = of_get_mac_address(np);
373 if (IS_ERR(*mac)) {
374 if (PTR_ERR(*mac) == -EPROBE_DEFER)
375 return ERR_CAST(*mac);
376
377 *mac = NULL;
378 }
379
373 plat->interface = of_get_phy_mode(np); 380 plat->interface = of_get_phy_mode(np);
374 381
375 /* Some wrapper drivers still rely on phy_node. Let's save it while 382 /* Some wrapper drivers still rely on phy_node. Let's save it while
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 58ea18af9813..6c305b6ecad0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
37 entry = &priv->tc_entries[i]; 37 entry = &priv->tc_entries[i];
38 if (!entry->in_use && !first && free) 38 if (!entry->in_use && !first && free)
39 first = entry; 39 first = entry;
40 if (entry->handle == loc && !free) 40 if ((entry->handle == loc) && !free && !entry->is_frag)
41 dup = entry; 41 dup = entry;
42 } 42 }
43 43
@@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv,
94 struct stmmac_tc_entry *entry, *frag = NULL; 94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel; 95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem; 96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio; 97 u32 prio = cls->common.prio << 16;
98 int ret; 98 int ret;
99 99
100 /* Only 1 match per entry */ 100 /* Only 1 match per entry */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 32a89744972d..a46b8b2e44e1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
2775 if (!cpsw) 2775 if (!cpsw)
2776 return -ENOMEM; 2776 return -ENOMEM;
2777 2777
2778 platform_set_drvdata(pdev, cpsw);
2778 cpsw->dev = dev; 2779 cpsw->dev = dev;
2779 2780
2780 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 2781 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
2879 goto clean_cpts; 2880 goto clean_cpts;
2880 } 2881 }
2881 2882
2882 platform_set_drvdata(pdev, cpsw);
2883 priv = netdev_priv(ndev); 2883 priv = netdev_priv(ndev);
2884 priv->cpsw = cpsw; 2884 priv->cpsw = cpsw;
2885 priv->ndev = ndev; 2885 priv->ndev = ndev;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5b196ebfed49..0f346761a2b2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -788,6 +788,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
788 /* fallthrough, if we release the descriptors 788 /* fallthrough, if we release the descriptors
789 * brutally (then we don't care about 789 * brutally (then we don't care about
790 * SPIDER_NET_DESCR_CARDOWNED) */ 790 * SPIDER_NET_DESCR_CARDOWNED) */
791 /* Fall through */
791 792
792 case SPIDER_NET_DESCR_RESPONSE_ERROR: 793 case SPIDER_NET_DESCR_RESPONSE_ERROR:
793 case SPIDER_NET_DESCR_PROTECTION_ERROR: 794 case SPIDER_NET_DESCR_PROTECTION_ERROR:
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 8479a440527b..12466a72cefc 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit)
1504 pci_unmap_single(lp->pci_dev, 1504 pci_unmap_single(lp->pci_dev,
1505 lp->rx_skbs[cur_bd].skb_dma, 1505 lp->rx_skbs[cur_bd].skb_dma,
1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) 1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1508 memmove(skb->data, skb->data - NET_IP_ALIGN, 1508 memmove(skb->data, skb->data - NET_IP_ALIGN,
1509 pkt_len); 1509 pkt_len);
1510 data = skb_put(skb, pkt_len); 1510 data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 78a7de3fb622..c62f474b6d08 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
371static void tsi108_stat_carry(struct net_device *dev) 371static void tsi108_stat_carry(struct net_device *dev)
372{ 372{
373 struct tsi108_prv_data *data = netdev_priv(dev); 373 struct tsi108_prv_data *data = netdev_priv(dev);
374 unsigned long flags;
374 u32 carry1, carry2; 375 u32 carry1, carry2;
375 376
376 spin_lock_irq(&data->misclock); 377 spin_lock_irqsave(&data->misclock, flags);
377 378
378 carry1 = TSI_READ(TSI108_STAT_CARRY1); 379 carry1 = TSI_READ(TSI108_STAT_CARRY1);
379 carry2 = TSI_READ(TSI108_STAT_CARRY2); 380 carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev)
441 TSI108_STAT_TXPAUSEDROP_CARRY, 442 TSI108_STAT_TXPAUSEDROP_CARRY,
442 &data->tx_pause_drop); 443 &data->tx_pause_drop);
443 444
444 spin_unlock_irq(&data->misclock); 445 spin_unlock_irqrestore(&data->misclock, flags);
445} 446}
446 447
447/* Read a stat counter atomically with respect to carries. 448/* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 2f354ba029a6..cd0a8f46e7c6 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -13,7 +13,7 @@ config NET_VENDOR_XSCALE
13 13
14 Note that the answer to this question does not directly affect the 14 Note that the answer to this question does not directly affect the
15 kernel: saying N will just cause the configurator to skip all 15 kernel: saying N will just cause the configurator to skip all
16 the questions about XSacle IXP devices. If you say Y, you will be 16 the questions about XScale IXP devices. If you say Y, you will be
17 asked for your specific card in the following questions. 17 asked for your specific card in the following questions.
18 18
19if NET_VENDOR_XSCALE 19if NET_VENDOR_XSCALE
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index daab2c07d891..9303aeb2595f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -500,8 +500,9 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
500 } 500 }
501 break; 501 break;
502 } 502 }
503 /* fall through */
503 504
504 default: /* fall through */ 505 default:
505 if (bc->hdlctx.calibrate <= 0) 506 if (bc->hdlctx.calibrate <= 0)
506 return 0; 507 return 0;
507 i = min_t(int, cnt, bc->hdlctx.calibrate); 508 i = min_t(int, cnt, bc->hdlctx.calibrate);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index afdcc5664ea6..e8fce6d715ef 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net,
836 836
837 if (unlikely(!skb)) { 837 if (unlikely(!skb)) {
838 ++net_device_ctx->eth_stats.rx_no_memory; 838 ++net_device_ctx->eth_stats.rx_no_memory;
839 rcu_read_unlock();
840 return NVSP_STAT_FAIL; 839 return NVSP_STAT_FAIL;
841 } 840 }
842 841
@@ -1240,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net,
1240 struct rtnl_link_stats64 *t) 1239 struct rtnl_link_stats64 *t)
1241{ 1240{
1242 struct net_device_context *ndev_ctx = netdev_priv(net); 1241 struct net_device_context *ndev_ctx = netdev_priv(net);
1243 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1242 struct netvsc_device *nvdev;
1244 struct netvsc_vf_pcpu_stats vf_tot; 1243 struct netvsc_vf_pcpu_stats vf_tot;
1245 int i; 1244 int i;
1246 1245
1246 rcu_read_lock();
1247
1248 nvdev = rcu_dereference(ndev_ctx->nvdev);
1247 if (!nvdev) 1249 if (!nvdev)
1248 return; 1250 goto out;
1249 1251
1250 netdev_stats_to_stats64(t, &net->stats); 1252 netdev_stats_to_stats64(t, &net->stats);
1251 1253
@@ -1284,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net,
1284 t->rx_packets += packets; 1286 t->rx_packets += packets;
1285 t->multicast += multicast; 1287 t->multicast += multicast;
1286 } 1288 }
1289out:
1290 rcu_read_unlock();
1287} 1291}
1288 1292
1289static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1293static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b41696e16bdc..c20e7ef18bc9 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
802 err = hwsim_subscribe_all_others(phy); 802 err = hwsim_subscribe_all_others(phy);
803 if (err < 0) { 803 if (err < 0) {
804 mutex_unlock(&hwsim_phys_lock); 804 mutex_unlock(&hwsim_phys_lock);
805 goto err_reg; 805 goto err_subscribe;
806 } 806 }
807 } 807 }
808 list_add_tail(&phy->list, &hwsim_phys); 808 list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
812 812
813 return idx; 813 return idx;
814 814
815err_subscribe:
816 ieee802154_unregister_hw(phy->hw);
815err_reg: 817err_reg:
816 kfree(pib); 818 kfree(pib);
817err_pib: 819err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
901 return 0; 903 return 0;
902 904
903platform_drv: 905platform_drv:
904 genl_unregister_family(&hwsim_genl_family);
905platform_dev:
906 platform_device_unregister(mac802154hwsim_dev); 906 platform_device_unregister(mac802154hwsim_dev);
907platform_dev:
908 genl_unregister_family(&hwsim_genl_family);
907 return rc; 909 return rc;
908} 910}
909 911
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index c5c417a3c0ce..bcc40a236624 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -73,46 +73,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
73 debugfs_remove_recursive(nsim_dev_port->ddir); 73 debugfs_remove_recursive(nsim_dev_port->ddir);
74} 74}
75 75
76static struct net *nsim_devlink_net(struct devlink *devlink)
77{
78 return &init_net;
79}
80
76static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) 81static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
77{ 82{
78 struct nsim_dev *nsim_dev = priv; 83 struct net *net = priv;
79 84
80 return nsim_fib_get_val(nsim_dev->fib_data, 85 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
81 NSIM_RESOURCE_IPV4_FIB, false);
82} 86}
83 87
84static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) 88static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
85{ 89{
86 struct nsim_dev *nsim_dev = priv; 90 struct net *net = priv;
87 91
88 return nsim_fib_get_val(nsim_dev->fib_data, 92 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
89 NSIM_RESOURCE_IPV4_FIB_RULES, false);
90} 93}
91 94
92static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) 95static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
93{ 96{
94 struct nsim_dev *nsim_dev = priv; 97 struct net *net = priv;
95 98
96 return nsim_fib_get_val(nsim_dev->fib_data, 99 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
97 NSIM_RESOURCE_IPV6_FIB, false);
98} 100}
99 101
100static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) 102static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
101{ 103{
102 struct nsim_dev *nsim_dev = priv; 104 struct net *net = priv;
103 105
104 return nsim_fib_get_val(nsim_dev->fib_data, 106 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
105 NSIM_RESOURCE_IPV6_FIB_RULES, false);
106} 107}
107 108
108static int nsim_dev_resources_register(struct devlink *devlink) 109static int nsim_dev_resources_register(struct devlink *devlink)
109{ 110{
110 struct nsim_dev *nsim_dev = devlink_priv(devlink);
111 struct devlink_resource_size_params params = { 111 struct devlink_resource_size_params params = {
112 .size_max = (u64)-1, 112 .size_max = (u64)-1,
113 .size_granularity = 1, 113 .size_granularity = 1,
114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY 114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY
115 }; 115 };
116 struct net *net = nsim_devlink_net(devlink);
116 int err; 117 int err;
117 u64 n; 118 u64 n;
118 119
@@ -126,8 +127,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
126 goto out; 127 goto out;
127 } 128 }
128 129
129 n = nsim_fib_get_val(nsim_dev->fib_data, 130 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
130 NSIM_RESOURCE_IPV4_FIB, true);
131 err = devlink_resource_register(devlink, "fib", n, 131 err = devlink_resource_register(devlink, "fib", n,
132 NSIM_RESOURCE_IPV4_FIB, 132 NSIM_RESOURCE_IPV4_FIB,
133 NSIM_RESOURCE_IPV4, &params); 133 NSIM_RESOURCE_IPV4, &params);
@@ -136,8 +136,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
136 return err; 136 return err;
137 } 137 }
138 138
139 n = nsim_fib_get_val(nsim_dev->fib_data, 139 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
140 NSIM_RESOURCE_IPV4_FIB_RULES, true);
141 err = devlink_resource_register(devlink, "fib-rules", n, 140 err = devlink_resource_register(devlink, "fib-rules", n,
142 NSIM_RESOURCE_IPV4_FIB_RULES, 141 NSIM_RESOURCE_IPV4_FIB_RULES,
143 NSIM_RESOURCE_IPV4, &params); 142 NSIM_RESOURCE_IPV4, &params);
@@ -156,8 +155,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
156 goto out; 155 goto out;
157 } 156 }
158 157
159 n = nsim_fib_get_val(nsim_dev->fib_data, 158 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
160 NSIM_RESOURCE_IPV6_FIB, true);
161 err = devlink_resource_register(devlink, "fib", n, 159 err = devlink_resource_register(devlink, "fib", n,
162 NSIM_RESOURCE_IPV6_FIB, 160 NSIM_RESOURCE_IPV6_FIB,
163 NSIM_RESOURCE_IPV6, &params); 161 NSIM_RESOURCE_IPV6, &params);
@@ -166,8 +164,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
166 return err; 164 return err;
167 } 165 }
168 166
169 n = nsim_fib_get_val(nsim_dev->fib_data, 167 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
170 NSIM_RESOURCE_IPV6_FIB_RULES, true);
171 err = devlink_resource_register(devlink, "fib-rules", n, 168 err = devlink_resource_register(devlink, "fib-rules", n,
172 NSIM_RESOURCE_IPV6_FIB_RULES, 169 NSIM_RESOURCE_IPV6_FIB_RULES,
173 NSIM_RESOURCE_IPV6, &params); 170 NSIM_RESOURCE_IPV6, &params);
@@ -179,19 +176,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
179 devlink_resource_occ_get_register(devlink, 176 devlink_resource_occ_get_register(devlink,
180 NSIM_RESOURCE_IPV4_FIB, 177 NSIM_RESOURCE_IPV4_FIB,
181 nsim_dev_ipv4_fib_resource_occ_get, 178 nsim_dev_ipv4_fib_resource_occ_get,
182 nsim_dev); 179 net);
183 devlink_resource_occ_get_register(devlink, 180 devlink_resource_occ_get_register(devlink,
184 NSIM_RESOURCE_IPV4_FIB_RULES, 181 NSIM_RESOURCE_IPV4_FIB_RULES,
185 nsim_dev_ipv4_fib_rules_res_occ_get, 182 nsim_dev_ipv4_fib_rules_res_occ_get,
186 nsim_dev); 183 net);
187 devlink_resource_occ_get_register(devlink, 184 devlink_resource_occ_get_register(devlink,
188 NSIM_RESOURCE_IPV6_FIB, 185 NSIM_RESOURCE_IPV6_FIB,
189 nsim_dev_ipv6_fib_resource_occ_get, 186 nsim_dev_ipv6_fib_resource_occ_get,
190 nsim_dev); 187 net);
191 devlink_resource_occ_get_register(devlink, 188 devlink_resource_occ_get_register(devlink,
192 NSIM_RESOURCE_IPV6_FIB_RULES, 189 NSIM_RESOURCE_IPV6_FIB_RULES,
193 nsim_dev_ipv6_fib_rules_res_occ_get, 190 nsim_dev_ipv6_fib_rules_res_occ_get,
194 nsim_dev); 191 net);
195out: 192out:
196 return err; 193 return err;
197} 194}
@@ -199,11 +196,11 @@ out:
199static int nsim_dev_reload(struct devlink *devlink, 196static int nsim_dev_reload(struct devlink *devlink,
200 struct netlink_ext_ack *extack) 197 struct netlink_ext_ack *extack)
201{ 198{
202 struct nsim_dev *nsim_dev = devlink_priv(devlink);
203 enum nsim_resource_id res_ids[] = { 199 enum nsim_resource_id res_ids[] = {
204 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, 200 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
205 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES 201 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
206 }; 202 };
203 struct net *net = nsim_devlink_net(devlink);
207 int i; 204 int i;
208 205
209 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { 206 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
@@ -212,8 +209,7 @@ static int nsim_dev_reload(struct devlink *devlink,
212 209
213 err = devlink_resource_size_get(devlink, res_ids[i], &val); 210 err = devlink_resource_size_get(devlink, res_ids[i], &val);
214 if (!err) { 211 if (!err) {
215 err = nsim_fib_set_max(nsim_dev->fib_data, 212 err = nsim_fib_set_max(net, res_ids[i], val, extack);
216 res_ids[i], val, extack);
217 if (err) 213 if (err)
218 return err; 214 return err;
219 } 215 }
@@ -285,15 +281,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
285 mutex_init(&nsim_dev->port_list_lock); 281 mutex_init(&nsim_dev->port_list_lock);
286 nsim_dev->fw_update_status = true; 282 nsim_dev->fw_update_status = true;
287 283
288 nsim_dev->fib_data = nsim_fib_create();
289 if (IS_ERR(nsim_dev->fib_data)) {
290 err = PTR_ERR(nsim_dev->fib_data);
291 goto err_devlink_free;
292 }
293
294 err = nsim_dev_resources_register(devlink); 284 err = nsim_dev_resources_register(devlink);
295 if (err) 285 if (err)
296 goto err_fib_destroy; 286 goto err_devlink_free;
297 287
298 err = devlink_register(devlink, &nsim_bus_dev->dev); 288 err = devlink_register(devlink, &nsim_bus_dev->dev);
299 if (err) 289 if (err)
@@ -315,8 +305,6 @@ err_dl_unregister:
315 devlink_unregister(devlink); 305 devlink_unregister(devlink);
316err_resources_unregister: 306err_resources_unregister:
317 devlink_resources_unregister(devlink, NULL); 307 devlink_resources_unregister(devlink, NULL);
318err_fib_destroy:
319 nsim_fib_destroy(nsim_dev->fib_data);
320err_devlink_free: 308err_devlink_free:
321 devlink_free(devlink); 309 devlink_free(devlink);
322 return ERR_PTR(err); 310 return ERR_PTR(err);
@@ -330,7 +318,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
330 nsim_dev_debugfs_exit(nsim_dev); 318 nsim_dev_debugfs_exit(nsim_dev);
331 devlink_unregister(devlink); 319 devlink_unregister(devlink);
332 devlink_resources_unregister(devlink, NULL); 320 devlink_resources_unregister(devlink, NULL);
333 nsim_fib_destroy(nsim_dev->fib_data);
334 mutex_destroy(&nsim_dev->port_list_lock); 321 mutex_destroy(&nsim_dev->port_list_lock);
335 devlink_free(devlink); 322 devlink_free(devlink);
336} 323}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 8c57ba747772..f61d094746c0 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,6 +18,7 @@
18#include <net/ip_fib.h> 18#include <net/ip_fib.h>
19#include <net/ip6_fib.h> 19#include <net/ip6_fib.h>
20#include <net/fib_rules.h> 20#include <net/fib_rules.h>
21#include <net/netns/generic.h>
21 22
22#include "netdevsim.h" 23#include "netdevsim.h"
23 24
@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
32}; 33};
33 34
34struct nsim_fib_data { 35struct nsim_fib_data {
35 struct notifier_block fib_nb;
36 struct nsim_per_fib_data ipv4; 36 struct nsim_per_fib_data ipv4;
37 struct nsim_per_fib_data ipv6; 37 struct nsim_per_fib_data ipv6;
38}; 38};
39 39
40u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 40static unsigned int nsim_fib_net_id;
41 enum nsim_resource_id res_id, bool max) 41
42u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
42{ 43{
44 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
43 struct nsim_fib_entry *entry; 45 struct nsim_fib_entry *entry;
44 46
45 switch (res_id) { 47 switch (res_id) {
@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
62 return max ? entry->max : entry->num; 64 return max ? entry->max : entry->num;
63} 65}
64 66
65int nsim_fib_set_max(struct nsim_fib_data *fib_data, 67int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
66 enum nsim_resource_id res_id, u64 val,
67 struct netlink_ext_ack *extack) 68 struct netlink_ext_ack *extack)
68{ 69{
70 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
69 struct nsim_fib_entry *entry; 71 struct nsim_fib_entry *entry;
70 int err = 0; 72 int err = 0;
71 73
@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
118 return err; 120 return err;
119} 121}
120 122
121static int nsim_fib_rule_event(struct nsim_fib_data *data, 123static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
122 struct fib_notifier_info *info, bool add)
123{ 124{
125 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
124 struct netlink_ext_ack *extack = info->extack; 126 struct netlink_ext_ack *extack = info->extack;
125 int err = 0; 127 int err = 0;
126 128
@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
155 return err; 157 return err;
156} 158}
157 159
158static int nsim_fib_event(struct nsim_fib_data *data, 160static int nsim_fib_event(struct fib_notifier_info *info, bool add)
159 struct fib_notifier_info *info, bool add)
160{ 161{
162 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
161 struct netlink_ext_ack *extack = info->extack; 163 struct netlink_ext_ack *extack = info->extack;
162 int err = 0; 164 int err = 0;
163 165
@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
176static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, 178static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
177 void *ptr) 179 void *ptr)
178{ 180{
179 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
180 fib_nb);
181 struct fib_notifier_info *info = ptr; 181 struct fib_notifier_info *info = ptr;
182 int err = 0; 182 int err = 0;
183 183
184 switch (event) { 184 switch (event) {
185 case FIB_EVENT_RULE_ADD: /* fall through */ 185 case FIB_EVENT_RULE_ADD: /* fall through */
186 case FIB_EVENT_RULE_DEL: 186 case FIB_EVENT_RULE_DEL:
187 err = nsim_fib_rule_event(data, info, 187 err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
188 event == FIB_EVENT_RULE_ADD);
189 break; 188 break;
190 189
191 case FIB_EVENT_ENTRY_ADD: /* fall through */ 190 case FIB_EVENT_ENTRY_ADD: /* fall through */
192 case FIB_EVENT_ENTRY_DEL: 191 case FIB_EVENT_ENTRY_DEL:
193 err = nsim_fib_event(data, info, 192 err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
194 event == FIB_EVENT_ENTRY_ADD);
195 break; 193 break;
196 } 194 }
197 195
@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
201/* inconsistent dump, trying again */ 199/* inconsistent dump, trying again */
202static void nsim_fib_dump_inconsistent(struct notifier_block *nb) 200static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
203{ 201{
204 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, 202 struct nsim_fib_data *data;
205 fib_nb); 203 struct net *net;
204
205 rcu_read_lock();
206 for_each_net_rcu(net) {
207 data = net_generic(net, nsim_fib_net_id);
208
209 data->ipv4.fib.num = 0ULL;
210 data->ipv4.rules.num = 0ULL;
206 211
207 data->ipv4.fib.num = 0ULL; 212 data->ipv6.fib.num = 0ULL;
208 data->ipv4.rules.num = 0ULL; 213 data->ipv6.rules.num = 0ULL;
209 data->ipv6.fib.num = 0ULL; 214 }
210 data->ipv6.rules.num = 0ULL; 215 rcu_read_unlock();
211} 216}
212 217
213struct nsim_fib_data *nsim_fib_create(void) 218static struct notifier_block nsim_fib_nb = {
214{ 219 .notifier_call = nsim_fib_event_nb,
215 struct nsim_fib_data *data; 220};
216 int err;
217 221
218 data = kzalloc(sizeof(*data), GFP_KERNEL); 222/* Initialize per network namespace state */
219 if (!data) 223static int __net_init nsim_fib_netns_init(struct net *net)
220 return ERR_PTR(-ENOMEM); 224{
225 struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
221 226
222 data->ipv4.fib.max = (u64)-1; 227 data->ipv4.fib.max = (u64)-1;
223 data->ipv4.rules.max = (u64)-1; 228 data->ipv4.rules.max = (u64)-1;
@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
225 data->ipv6.fib.max = (u64)-1; 230 data->ipv6.fib.max = (u64)-1;
226 data->ipv6.rules.max = (u64)-1; 231 data->ipv6.rules.max = (u64)-1;
227 232
228 data->fib_nb.notifier_call = nsim_fib_event_nb; 233 return 0;
229 err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); 234}
230 if (err) {
231 pr_err("Failed to register fib notifier\n");
232 goto err_out;
233 }
234 235
235 return data; 236static struct pernet_operations nsim_fib_net_ops = {
237 .init = nsim_fib_netns_init,
238 .id = &nsim_fib_net_id,
239 .size = sizeof(struct nsim_fib_data),
240};
236 241
237err_out: 242void nsim_fib_exit(void)
238 kfree(data); 243{
239 return ERR_PTR(err); 244 unregister_pernet_subsys(&nsim_fib_net_ops);
245 unregister_fib_notifier(&nsim_fib_nb);
240} 246}
241 247
242void nsim_fib_destroy(struct nsim_fib_data *data) 248int nsim_fib_init(void)
243{ 249{
244 unregister_fib_notifier(&data->fib_nb); 250 int err;
245 kfree(data); 251
252 err = register_pernet_subsys(&nsim_fib_net_ops);
253 if (err < 0) {
254 pr_err("Failed to register pernet subsystem\n");
255 goto err_out;
256 }
257
258 err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
259 if (err < 0) {
260 pr_err("Failed to register fib notifier\n");
261 goto err_out;
262 }
263
264err_out:
265 return err;
246} 266}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0740940f41b1..55f57f76d01b 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -357,12 +357,18 @@ static int __init nsim_module_init(void)
357 if (err) 357 if (err)
358 goto err_dev_exit; 358 goto err_dev_exit;
359 359
360 err = rtnl_link_register(&nsim_link_ops); 360 err = nsim_fib_init();
361 if (err) 361 if (err)
362 goto err_bus_exit; 362 goto err_bus_exit;
363 363
364 err = rtnl_link_register(&nsim_link_ops);
365 if (err)
366 goto err_fib_exit;
367
364 return 0; 368 return 0;
365 369
370err_fib_exit:
371 nsim_fib_exit();
366err_bus_exit: 372err_bus_exit:
367 nsim_bus_exit(); 373 nsim_bus_exit();
368err_dev_exit: 374err_dev_exit:
@@ -373,6 +379,7 @@ err_dev_exit:
373static void __exit nsim_module_exit(void) 379static void __exit nsim_module_exit(void)
374{ 380{
375 rtnl_link_unregister(&nsim_link_ops); 381 rtnl_link_unregister(&nsim_link_ops);
382 nsim_fib_exit();
376 nsim_bus_exit(); 383 nsim_bus_exit();
377 nsim_dev_exit(); 384 nsim_dev_exit();
378} 385}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 79c05af2a7c0..9404637d34b7 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -169,12 +169,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, 169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
170 unsigned int port_index); 170 unsigned int port_index);
171 171
172struct nsim_fib_data *nsim_fib_create(void); 172int nsim_fib_init(void);
173void nsim_fib_destroy(struct nsim_fib_data *fib_data); 173void nsim_fib_exit(void);
174u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 174u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
175 enum nsim_resource_id res_id, bool max); 175int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
176int nsim_fib_set_max(struct nsim_fib_data *fib_data,
177 enum nsim_resource_id res_id, u64 val,
178 struct netlink_ext_ack *extack); 176 struct netlink_ext_ack *extack);
179 177
180#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) 178#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 222ccd9ecfce..6ad8b1c63c34 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -257,36 +257,20 @@ static int at803x_config_init(struct phy_device *phydev)
257 * after HW reset: RX delay enabled and TX delay disabled 257 * after HW reset: RX delay enabled and TX delay disabled
258 * after SW reset: RX delay enabled, while TX delay retains the 258 * after SW reset: RX delay enabled, while TX delay retains the
259 * value before reset. 259 * value before reset.
260 *
261 * So let's first disable the RX and TX delays in PHY and enable
262 * them based on the mode selected (this also takes care of RGMII
263 * mode where we expect delays to be disabled)
264 */ 260 */
265
266 ret = at803x_disable_rx_delay(phydev);
267 if (ret < 0)
268 return ret;
269 ret = at803x_disable_tx_delay(phydev);
270 if (ret < 0)
271 return ret;
272
273 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 261 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
274 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { 262 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
275 /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
276 * otherwise keep it disabled
277 */
278 ret = at803x_enable_rx_delay(phydev); 263 ret = at803x_enable_rx_delay(phydev);
279 if (ret < 0) 264 else
280 return ret; 265 ret = at803x_disable_rx_delay(phydev);
281 } 266 if (ret < 0)
267 return ret;
282 268
283 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 269 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
284 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 270 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
285 /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
286 * otherwise keep it disabled
287 */
288 ret = at803x_enable_tx_delay(phydev); 271 ret = at803x_enable_tx_delay(phydev);
289 } 272 else
273 ret = at803x_disable_tx_delay(phydev);
290 274
291 return ret; 275 return ret;
292} 276}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 3ffe46df249e..7c5265fd2b94 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -216,8 +216,10 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
216 if (IS_ERR(gpiod)) { 216 if (IS_ERR(gpiod)) {
217 if (PTR_ERR(gpiod) == -EPROBE_DEFER) 217 if (PTR_ERR(gpiod) == -EPROBE_DEFER)
218 return gpiod; 218 return gpiod;
219 pr_err("error getting GPIO for fixed link %pOF, proceed without\n", 219
220 fixed_link_node); 220 if (PTR_ERR(gpiod) != -ENOENT)
221 pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
222 fixed_link_node);
221 gpiod = NULL; 223 gpiod = NULL;
222 } 224 }
223 225
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 28676af97b42..645d354ffb48 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -2226,8 +2226,8 @@ static int vsc8514_probe(struct phy_device *phydev)
2226 vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; 2226 vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
2227 vsc8531->hw_stats = vsc85xx_hw_stats; 2227 vsc8531->hw_stats = vsc85xx_hw_stats;
2228 vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); 2228 vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
2229 vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, 2229 vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
2230 sizeof(u64), GFP_KERNEL); 2230 sizeof(u64), GFP_KERNEL);
2231 if (!vsc8531->stats) 2231 if (!vsc8531->stats)
2232 return -ENOMEM; 2232 return -ENOMEM;
2233 2233
@@ -2251,8 +2251,8 @@ static int vsc8574_probe(struct phy_device *phydev)
2251 vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; 2251 vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
2252 vsc8531->hw_stats = vsc8584_hw_stats; 2252 vsc8531->hw_stats = vsc8584_hw_stats;
2253 vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); 2253 vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
2254 vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, 2254 vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
2255 sizeof(u64), GFP_KERNEL); 2255 sizeof(u64), GFP_KERNEL);
2256 if (!vsc8531->stats) 2256 if (!vsc8531->stats)
2257 return -ENOMEM; 2257 return -ENOMEM;
2258 2258
@@ -2281,8 +2281,8 @@ static int vsc8584_probe(struct phy_device *phydev)
2281 vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; 2281 vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
2282 vsc8531->hw_stats = vsc8584_hw_stats; 2282 vsc8531->hw_stats = vsc8584_hw_stats;
2283 vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); 2283 vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
2284 vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, 2284 vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
2285 sizeof(u64), GFP_KERNEL); 2285 sizeof(u64), GFP_KERNEL);
2286 if (!vsc8531->stats) 2286 if (!vsc8531->stats)
2287 return -ENOMEM; 2287 return -ENOMEM;
2288 2288
@@ -2311,8 +2311,8 @@ static int vsc85xx_probe(struct phy_device *phydev)
2311 vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; 2311 vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
2312 vsc8531->hw_stats = vsc85xx_hw_stats; 2312 vsc8531->hw_stats = vsc85xx_hw_stats;
2313 vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); 2313 vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
2314 vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, 2314 vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
2315 sizeof(u64), GFP_KERNEL); 2315 sizeof(u64), GFP_KERNEL);
2316 if (!vsc8531->stats) 2316 if (!vsc8531->stats)
2317 return -ENOMEM; 2317 return -ENOMEM;
2318 2318
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index b9d4145781ca..7935593debb1 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
219 int val, devad; 219 int val, devad;
220 bool link = true; 220 bool link = true;
221 221
222 if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
223 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
224 if (val < 0)
225 return val;
226
227 /* Autoneg is being started, therefore disregard current
228 * link status and report link as down.
229 */
230 if (val & MDIO_AN_CTRL1_RESTART) {
231 phydev->link = 0;
232 return 0;
233 }
234 }
235
222 while (mmd_mask && link) { 236 while (mmd_mask && link) {
223 devad = __ffs(mmd_mask); 237 devad = __ffs(mmd_mask);
224 mmd_mask &= ~BIT(devad); 238 mmd_mask &= ~BIT(devad);
@@ -509,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev)
509} 523}
510EXPORT_SYMBOL_GPL(genphy_c45_read_status); 524EXPORT_SYMBOL_GPL(genphy_c45_read_status);
511 525
526/**
527 * genphy_c45_config_aneg - restart auto-negotiation or forced setup
528 * @phydev: target phy_device struct
529 *
530 * Description: If auto-negotiation is enabled, we configure the
531 * advertising, and then restart auto-negotiation. If it is not
532 * enabled, then we force a configuration.
533 */
534int genphy_c45_config_aneg(struct phy_device *phydev)
535{
536 bool changed = false;
537 int ret;
538
539 if (phydev->autoneg == AUTONEG_DISABLE)
540 return genphy_c45_pma_setup_forced(phydev);
541
542 ret = genphy_c45_an_config_aneg(phydev);
543 if (ret < 0)
544 return ret;
545 if (ret > 0)
546 changed = true;
547
548 return genphy_c45_check_and_restart_aneg(phydev, changed);
549}
550EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
551
512/* The gen10g_* functions are the old Clause 45 stub */ 552/* The gen10g_* functions are the old Clause 45 stub */
513 553
514int gen10g_config_aneg(struct phy_device *phydev) 554int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef7aa738e0dc..6b0f89369b46 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev)
507 * allowed to call genphy_config_aneg() 507 * allowed to call genphy_config_aneg()
508 */ 508 */
509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
510 return -EOPNOTSUPP; 510 return genphy_c45_config_aneg(phydev);
511 511
512 return genphy_config_aneg(phydev); 512 return genphy_config_aneg(phydev);
513} 513}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6b5cb87f3866..27ebc2c6c2d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1752,7 +1752,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
1752 */ 1752 */
1753int genphy_update_link(struct phy_device *phydev) 1753int genphy_update_link(struct phy_device *phydev)
1754{ 1754{
1755 int status; 1755 int status = 0, bmcr;
1756
1757 bmcr = phy_read(phydev, MII_BMCR);
1758 if (bmcr < 0)
1759 return bmcr;
1760
1761 /* Autoneg is being started, therefore disregard BMSR value and
1762 * report link as down.
1763 */
1764 if (bmcr & BMCR_ANRESTART)
1765 goto done;
1756 1766
1757 /* The link state is latched low so that momentary link 1767 /* The link state is latched low so that momentary link
1758 * drops can be detected. Do not double-read the status 1768 * drops can be detected. Do not double-read the status
@@ -1774,6 +1784,12 @@ done:
1774 phydev->link = status & BMSR_LSTATUS ? 1 : 0; 1784 phydev->link = status & BMSR_LSTATUS ? 1 : 0;
1775 phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0; 1785 phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
1776 1786
1787 /* Consider the case that autoneg was started and "aneg complete"
1788 * bit has been reset, but "link up" bit not yet.
1789 */
1790 if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
1791 phydev->link = 0;
1792
1777 return 0; 1793 return 0;
1778} 1794}
1779EXPORT_SYMBOL(genphy_update_link); 1795EXPORT_SYMBOL(genphy_update_link);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index b86a4b2116f8..59a94e07e7c5 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -48,8 +48,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
48 if (!phy->last_triggered) 48 if (!phy->last_triggered)
49 led_trigger_event(&phy->led_link_trigger->trigger, 49 led_trigger_event(&phy->led_link_trigger->trigger,
50 LED_FULL); 50 LED_FULL);
51 else
52 led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
51 53
52 led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
53 led_trigger_event(&plt->trigger, LED_FULL); 54 led_trigger_event(&plt->trigger, LED_FULL);
54 phy->last_triggered = plt; 55 phy->last_triggered = plt;
55 } 56 }
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5d0af041b8f9..a45c5de96ab1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -216,6 +216,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
216 pl->supported, true); 216 pl->supported, true);
217 linkmode_zero(pl->supported); 217 linkmode_zero(pl->supported);
218 phylink_set(pl->supported, MII); 218 phylink_set(pl->supported, MII);
219 phylink_set(pl->supported, Pause);
220 phylink_set(pl->supported, Asym_Pause);
219 if (s) { 221 if (s) {
220 __set_bit(s->bit, pl->supported); 222 __set_bit(s->bit, pl->supported);
221 } else { 223 } else {
@@ -990,10 +992,10 @@ void phylink_start(struct phylink *pl)
990 } 992 }
991 if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) 993 if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
992 mod_timer(&pl->link_poll, jiffies + HZ); 994 mod_timer(&pl->link_poll, jiffies + HZ);
993 if (pl->sfp_bus)
994 sfp_upstream_start(pl->sfp_bus);
995 if (pl->phydev) 995 if (pl->phydev)
996 phy_start(pl->phydev); 996 phy_start(pl->phydev);
997 if (pl->sfp_bus)
998 sfp_upstream_start(pl->sfp_bus);
997} 999}
998EXPORT_SYMBOL_GPL(phylink_start); 1000EXPORT_SYMBOL_GPL(phylink_start);
999 1001
@@ -1010,10 +1012,10 @@ void phylink_stop(struct phylink *pl)
1010{ 1012{
1011 ASSERT_RTNL(); 1013 ASSERT_RTNL();
1012 1014
1013 if (pl->phydev)
1014 phy_stop(pl->phydev);
1015 if (pl->sfp_bus) 1015 if (pl->sfp_bus)
1016 sfp_upstream_stop(pl->sfp_bus); 1016 sfp_upstream_stop(pl->sfp_bus);
1017 if (pl->phydev)
1018 phy_stop(pl->phydev);
1017 del_timer_sync(&pl->link_poll); 1019 del_timer_sync(&pl->link_poll);
1018 if (pl->link_irq) { 1020 if (pl->link_irq) {
1019 free_irq(pl->link_irq, pl); 1021 free_irq(pl->link_irq, pl);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 2d816aadea79..e36c04c26866 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -517,7 +517,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
517 517
518static void sfp_hwmon_to_rx_power(long *value) 518static void sfp_hwmon_to_rx_power(long *value)
519{ 519{
520 *value = DIV_ROUND_CLOSEST(*value, 100); 520 *value = DIV_ROUND_CLOSEST(*value, 10);
521} 521}
522 522
523static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, 523static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1d902ecb4aa8..a44dd3c8af63 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1115,6 +1115,9 @@ static const struct proto_ops pppoe_ops = {
1115 .recvmsg = pppoe_recvmsg, 1115 .recvmsg = pppoe_recvmsg,
1116 .mmap = sock_no_mmap, 1116 .mmap = sock_no_mmap,
1117 .ioctl = pppox_ioctl, 1117 .ioctl = pppox_ioctl,
1118#ifdef CONFIG_COMPAT
1119 .compat_ioctl = pppox_compat_ioctl,
1120#endif
1118}; 1121};
1119 1122
1120static const struct pppox_proto pppoe_proto = { 1123static const struct pppox_proto pppoe_proto = {
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 5ef422a43d70..08364f10a43f 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/compat.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
22#include <linux/net.h> 23#include <linux/net.h>
@@ -98,6 +99,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
98 99
99EXPORT_SYMBOL(pppox_ioctl); 100EXPORT_SYMBOL(pppox_ioctl);
100 101
102#ifdef CONFIG_COMPAT
103int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
104{
105 if (cmd == PPPOEIOCSFWD32)
106 cmd = PPPOEIOCSFWD;
107
108 return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
109}
110
111EXPORT_SYMBOL(pppox_compat_ioctl);
112#endif
113
101static int pppox_create(struct net *net, struct socket *sock, int protocol, 114static int pppox_create(struct net *net, struct socket *sock, int protocol,
102 int kern) 115 int kern)
103{ 116{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index a8e52c8e4128..734de7de03f7 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -623,6 +623,9 @@ static const struct proto_ops pptp_ops = {
623 .recvmsg = sock_no_recvmsg, 623 .recvmsg = sock_no_recvmsg,
624 .mmap = sock_no_mmap, 624 .mmap = sock_no_mmap,
625 .ioctl = pppox_ioctl, 625 .ioctl = pppox_ioctl,
626#ifdef CONFIG_COMPAT
627 .compat_ioctl = pppox_compat_ioctl,
628#endif
626}; 629};
627 630
628static const struct pppox_proto pppox_pptp_proto = { 631static const struct pppox_proto pppox_pptp_proto = {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index abfa0da9bbd2..e8089def5a46 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
1004 1004
1005 team->dev->vlan_features = vlan_features; 1005 team->dev->vlan_features = vlan_features;
1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1007 NETIF_F_HW_VLAN_CTAG_TX |
1008 NETIF_F_HW_VLAN_STAG_TX |
1007 NETIF_F_GSO_UDP_L4; 1009 NETIF_F_GSO_UDP_L4;
1008 team->dev->hard_header_len = max_hard_header_len; 1010 team->dev->hard_header_len = max_hard_header_len;
1009 1011
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3d443597bd04..db16d7a13e00 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1599,7 +1599,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1599 return true; 1599 return true;
1600} 1600}
1601 1601
1602static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, 1602static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1603 struct page_frag *alloc_frag, char *buf,
1603 int buflen, int len, int pad) 1604 int buflen, int len, int pad)
1604{ 1605{
1605 struct sk_buff *skb = build_skb(buf, buflen); 1606 struct sk_buff *skb = build_skb(buf, buflen);
@@ -1609,6 +1610,7 @@ static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
1609 1610
1610 skb_reserve(skb, pad); 1611 skb_reserve(skb, pad);
1611 skb_put(skb, len); 1612 skb_put(skb, len);
1613 skb_set_owner_w(skb, tfile->socket.sk);
1612 1614
1613 get_page(alloc_frag->page); 1615 get_page(alloc_frag->page);
1614 alloc_frag->offset += buflen; 1616 alloc_frag->offset += buflen;
@@ -1686,7 +1688,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1686 */ 1688 */
1687 if (hdr->gso_type || !xdp_prog) { 1689 if (hdr->gso_type || !xdp_prog) {
1688 *skb_xdp = 1; 1690 *skb_xdp = 1;
1689 return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1691 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1692 pad);
1690 } 1693 }
1691 1694
1692 *skb_xdp = 0; 1695 *skb_xdp = 0;
@@ -1723,7 +1726,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1723 rcu_read_unlock(); 1726 rcu_read_unlock();
1724 local_bh_enable(); 1727 local_bh_enable();
1725 1728
1726 return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1729 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1727 1730
1728err_xdp: 1731err_xdp:
1729 put_page(alloc_frag->page); 1732 put_page(alloc_frag->page);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 5519248a791e..32b08b18e120 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
163 } 163 }
164 if (!timeout) { 164 if (!timeout) {
165 dev_err(&udev->dev, "firmware not ready in time\n"); 165 dev_err(&udev->dev, "firmware not ready in time\n");
166 return -ETIMEDOUT; 166 ret = -ETIMEDOUT;
167 goto err;
167 } 168 }
168 169
169 /* enable ethernet mode (?) */ 170 /* enable ethernet mode (?) */
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d62b6706a537..fc5895f85cee 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), 113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
114 usb_buf, 24); 114 usb_buf, 24);
115 if (status != 0) 115 if (status != 0)
116 return status; 116 goto out;
117 117
118 memcpy(usb_buf, init_msg_2, 12); 118 memcpy(usb_buf, init_msg_2, 12);
119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), 119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
120 usb_buf, 28); 120 usb_buf, 28);
121 if (status != 0) 121 if (status != 0)
122 return status; 122 goto out;
123 123
124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); 124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
125 125out:
126 kfree(usb_buf); 126 kfree(usb_buf);
127 return status; 127 return status;
128} 128}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3d92ea6fcc02..f033fee225a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3792 ret = register_netdev(netdev); 3792 ret = register_netdev(netdev);
3793 if (ret != 0) { 3793 if (ret != 0) {
3794 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3794 netif_err(dev, probe, netdev, "couldn't register the device\n");
3795 goto out3; 3795 goto out4;
3796 } 3796 }
3797 3797
3798 usb_set_intfdata(intf, dev); 3798 usb_set_intfdata(intf, dev);
@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
3807 3807
3808 ret = lan78xx_phy_init(dev); 3808 ret = lan78xx_phy_init(dev);
3809 if (ret < 0) 3809 if (ret < 0)
3810 goto out4; 3810 goto out5;
3811 3811
3812 return 0; 3812 return 0;
3813 3813
3814out4: 3814out5:
3815 unregister_netdev(netdev); 3815 unregister_netdev(netdev);
3816out4:
3817 usb_free_urb(dev->urb_intr);
3816out3: 3818out3:
3817 lan78xx_unbind(dev, intf); 3819 lan78xx_unbind(dev, intf);
3818out2: 3820out2:
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6d25dea5ad4b..f7d117d80cfb 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
282static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) 282static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
283{ 283{
284 int i; 284 int i;
285 __u8 tmp; 285 __u8 tmp = 0;
286 __le16 retdatai; 286 __le16 retdatai;
287 int ret; 287 int ret;
288 288
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 69e0a2acfcb0..b6dc5d714b5e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1295,6 +1295,7 @@ static const struct usb_device_id products[] = {
1295 {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ 1295 {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
1296 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ 1296 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1297 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ 1297 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1298 {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1298 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1299 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1299 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1300 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1300 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1301 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 39e0768d734d..04137ac373b0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -50,7 +50,7 @@
50#define PLA_TEREDO_WAKE_BASE 0xc0c4 50#define PLA_TEREDO_WAKE_BASE 0xc0c4
51#define PLA_MAR 0xcd00 51#define PLA_MAR 0xcd00
52#define PLA_BACKUP 0xd000 52#define PLA_BACKUP 0xd000
53#define PAL_BDC_CR 0xd1a0 53#define PLA_BDC_CR 0xd1a0
54#define PLA_TEREDO_TIMER 0xd2cc 54#define PLA_TEREDO_TIMER 0xd2cc
55#define PLA_REALWOW_TIMER 0xd2e8 55#define PLA_REALWOW_TIMER 0xd2e8
56#define PLA_SUSPEND_FLAG 0xd38a 56#define PLA_SUSPEND_FLAG 0xd38a
@@ -274,7 +274,7 @@
274#define TEREDO_RS_EVENT_MASK 0x00fe 274#define TEREDO_RS_EVENT_MASK 0x00fe
275#define OOB_TEREDO_EN 0x0001 275#define OOB_TEREDO_EN 0x0001
276 276
277/* PAL_BDC_CR */ 277/* PLA_BDC_CR */
278#define ALDPS_PROXY_MODE 0x0001 278#define ALDPS_PROXY_MODE 0x0001
279 279
280/* PLA_EFUSE_CMD */ 280/* PLA_EFUSE_CMD */
@@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
801 value, index, tmp, size, 500); 801 value, index, tmp, size, 500);
802 if (ret < 0)
803 memset(data, 0xff, size);
804 else
805 memcpy(data, tmp, size);
802 806
803 memcpy(data, tmp, size);
804 kfree(tmp); 807 kfree(tmp);
805 808
806 return ret; 809 return ret;
@@ -3191,9 +3194,9 @@ static void r8152b_enter_oob(struct r8152 *tp)
3191 3194
3192 rtl_rx_vlan_en(tp, true); 3195 rtl_rx_vlan_en(tp, true);
3193 3196
3194 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); 3197 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR);
3195 ocp_data |= ALDPS_PROXY_MODE; 3198 ocp_data |= ALDPS_PROXY_MODE;
3196 ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data); 3199 ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data);
3197 3200
3198 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 3201 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
3199 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; 3202 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
@@ -3577,9 +3580,9 @@ static void r8153_enter_oob(struct r8152 *tp)
3577 3580
3578 rtl_rx_vlan_en(tp, true); 3581 rtl_rx_vlan_en(tp, true);
3579 3582
3580 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); 3583 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR);
3581 ocp_data |= ALDPS_PROXY_MODE; 3584 ocp_data |= ALDPS_PROXY_MODE;
3582 ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data); 3585 ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data);
3583 3586
3584 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 3587 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
3585 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; 3588 ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
@@ -4018,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev)
4018#ifdef CONFIG_PM_SLEEP 4021#ifdef CONFIG_PM_SLEEP
4019 unregister_pm_notifier(&tp->pm_notifier); 4022 unregister_pm_notifier(&tp->pm_notifier);
4020#endif 4023#endif
4021 if (!test_bit(RTL8152_UNPLUG, &tp->flags)) 4024 napi_disable(&tp->napi);
4022 napi_disable(&tp->napi);
4023 clear_bit(WORK_ENABLE, &tp->flags); 4025 clear_bit(WORK_ENABLE, &tp->flags);
4024 usb_kill_urb(tp->intr_urb); 4026 usb_kill_urb(tp->intr_urb);
4025 cancel_delayed_work_sync(&tp->schedule); 4027 cancel_delayed_work_sync(&tp->schedule);
@@ -5350,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf,
5350 return 0; 5352 return 0;
5351 5353
5352out1: 5354out1:
5353 netif_napi_del(&tp->napi);
5354 usb_set_intfdata(intf, NULL); 5355 usb_set_intfdata(intf, NULL);
5355out: 5356out:
5356 free_netdev(netdev); 5357 free_netdev(netdev);
@@ -5365,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
5365 if (tp) { 5366 if (tp) {
5366 rtl_set_unplug(tp); 5367 rtl_set_unplug(tp);
5367 5368
5368 netif_napi_del(&tp->napi);
5369 unregister_netdev(tp->netdev); 5369 unregister_netdev(tp->netdev);
5370 cancel_delayed_work_sync(&tp->hw_phy_work); 5370 cancel_delayed_work_sync(&tp->hw_phy_work);
5371 tp->rtl_ops.unload(tp); 5371 tp->rtl_ops.unload(tp);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 54edf8956a25..6e84328bdd40 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
165static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, 165static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
166 struct net_device *dev) 166 struct net_device *dev)
167{ 167{
168 const struct ipv6hdr *iph = ipv6_hdr(skb); 168 const struct ipv6hdr *iph;
169 struct net *net = dev_net(skb->dev); 169 struct net *net = dev_net(skb->dev);
170 struct flowi6 fl6 = { 170 struct flowi6 fl6;
171 /* needed to match OIF rule */
172 .flowi6_oif = dev->ifindex,
173 .flowi6_iif = LOOPBACK_IFINDEX,
174 .daddr = iph->daddr,
175 .saddr = iph->saddr,
176 .flowlabel = ip6_flowinfo(iph),
177 .flowi6_mark = skb->mark,
178 .flowi6_proto = iph->nexthdr,
179 .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
180 };
181 int ret = NET_XMIT_DROP; 171 int ret = NET_XMIT_DROP;
182 struct dst_entry *dst; 172 struct dst_entry *dst;
183 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; 173 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
184 174
175 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
176 goto err;
177
178 iph = ipv6_hdr(skb);
179
180 memset(&fl6, 0, sizeof(fl6));
181 /* needed to match OIF rule */
182 fl6.flowi6_oif = dev->ifindex;
183 fl6.flowi6_iif = LOOPBACK_IFINDEX;
184 fl6.daddr = iph->daddr;
185 fl6.saddr = iph->saddr;
186 fl6.flowlabel = ip6_flowinfo(iph);
187 fl6.flowi6_mark = skb->mark;
188 fl6.flowi6_proto = iph->nexthdr;
189 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
190
185 dst = ip6_route_output(net, NULL, &fl6); 191 dst = ip6_route_output(net, NULL, &fl6);
186 if (dst == dst_null) 192 if (dst == dst_null)
187 goto err; 193 goto err;
@@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
237static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, 243static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
238 struct net_device *vrf_dev) 244 struct net_device *vrf_dev)
239{ 245{
240 struct iphdr *ip4h = ip_hdr(skb); 246 struct iphdr *ip4h;
241 int ret = NET_XMIT_DROP; 247 int ret = NET_XMIT_DROP;
242 struct flowi4 fl4 = { 248 struct flowi4 fl4;
243 /* needed to match OIF rule */
244 .flowi4_oif = vrf_dev->ifindex,
245 .flowi4_iif = LOOPBACK_IFINDEX,
246 .flowi4_tos = RT_TOS(ip4h->tos),
247 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
248 .flowi4_proto = ip4h->protocol,
249 .daddr = ip4h->daddr,
250 .saddr = ip4h->saddr,
251 };
252 struct net *net = dev_net(vrf_dev); 249 struct net *net = dev_net(vrf_dev);
253 struct rtable *rt; 250 struct rtable *rt;
254 251
252 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
253 goto err;
254
255 ip4h = ip_hdr(skb);
256
257 memset(&fl4, 0, sizeof(fl4));
258 /* needed to match OIF rule */
259 fl4.flowi4_oif = vrf_dev->ifindex;
260 fl4.flowi4_iif = LOOPBACK_IFINDEX;
261 fl4.flowi4_tos = RT_TOS(ip4h->tos);
262 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
263 fl4.flowi4_proto = ip4h->protocol;
264 fl4.daddr = ip4h->daddr;
265 fl4.saddr = ip4h->saddr;
266
255 rt = ip_route_output_flow(net, &fl4, NULL); 267 rt = ip_route_output_flow(net, &fl4, NULL);
256 if (IS_ERR(rt)) 268 if (IS_ERR(rt))
257 goto err; 269 goto err;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index a9ac3f37b904..e2e679a01b65 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -413,6 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
413 case SDLA_RET_NO_BUFS: 413 case SDLA_RET_NO_BUFS:
414 if (cmd == SDLA_INFORMATION_WRITE) 414 if (cmd == SDLA_INFORMATION_WRITE)
415 break; 415 break;
416 /* Else, fall through */
416 417
417 default: 418 default:
418 netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", 419 netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e9fc168bb734..489cba9b284d 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
351 } 351 }
352 result = i2400m_barker_db_add(barker); 352 result = i2400m_barker_db_add(barker);
353 if (result < 0) 353 if (result < 0)
354 goto error_add; 354 goto error_parse_add;
355 } 355 }
356 kfree(options_orig); 356 kfree(options_orig);
357 } 357 }
358 return 0; 358 return 0;
359 359
360error_parse_add:
360error_parse: 361error_parse:
362 kfree(options_orig);
361error_add: 363error_add:
362 kfree(i2400m_barker_db); 364 kfree(i2400m_barker_db);
363 return result; 365 return result;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d436cc51dfd1..2fb4258941a5 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -177,6 +177,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
177 .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG, 177 .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
178 .flags = WIPHY_VENDOR_CMD_NEED_WDEV | 178 .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
179 WIPHY_VENDOR_CMD_NEED_RUNNING, 179 WIPHY_VENDOR_CMD_NEED_RUNNING,
180 .policy = wil_rf_sector_policy,
180 .doit = wil_rf_sector_get_cfg 181 .doit = wil_rf_sector_get_cfg
181 }, 182 },
182 { 183 {
@@ -184,6 +185,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
184 .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG, 185 .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
185 .flags = WIPHY_VENDOR_CMD_NEED_WDEV | 186 .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
186 WIPHY_VENDOR_CMD_NEED_RUNNING, 187 WIPHY_VENDOR_CMD_NEED_RUNNING,
188 .policy = wil_rf_sector_policy,
187 .doit = wil_rf_sector_set_cfg 189 .doit = wil_rf_sector_set_cfg
188 }, 190 },
189 { 191 {
@@ -192,6 +194,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
192 QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR, 194 QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
193 .flags = WIPHY_VENDOR_CMD_NEED_WDEV | 195 .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
194 WIPHY_VENDOR_CMD_NEED_RUNNING, 196 WIPHY_VENDOR_CMD_NEED_RUNNING,
197 .policy = wil_rf_sector_policy,
195 .doit = wil_rf_sector_get_selected 198 .doit = wil_rf_sector_get_selected
196 }, 199 },
197 { 200 {
@@ -200,6 +203,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
200 QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR, 203 QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
201 .flags = WIPHY_VENDOR_CMD_NEED_WDEV | 204 .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
202 WIPHY_VENDOR_CMD_NEED_RUNNING, 205 WIPHY_VENDOR_CMD_NEED_RUNNING,
206 .policy = wil_rf_sector_policy,
203 .doit = wil_rf_sector_set_selected 207 .doit = wil_rf_sector_set_selected
204 }, 208 },
205}; 209};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index f6500899fc14..d07e7c7355d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -112,6 +112,7 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
112 }, 112 },
113 .flags = WIPHY_VENDOR_CMD_NEED_WDEV | 113 .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
114 WIPHY_VENDOR_CMD_NEED_NETDEV, 114 WIPHY_VENDOR_CMD_NEED_NETDEV,
115 .policy = VENDOR_CMD_RAW_DATA,
115 .doit = brcmf_cfg80211_vndr_cmds_dcmd_handler 116 .doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
116 }, 117 },
117}; 118};
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 1f500cddb3a7..55b713255b8e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
557}; 557};
558 558
559const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
560 .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
561 .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
562 IWL_DEVICE_22500,
563 /*
564 * This device doesn't support receiving BlockAck with a large bitmap
565 * so we need to restrict the size of transmitted aggregation to the
566 * HT size; mac80211 would otherwise pick the HE max (256) by default.
567 */
568 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
569};
570
571const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
572 .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
573 .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
574 IWL_DEVICE_22500,
575 /*
576 * This device doesn't support receiving BlockAck with a large bitmap
577 * so we need to restrict the size of transmitted aggregation to the
578 * HT size; mac80211 would otherwise pick the HE max (256) by default.
579 */
580 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
581};
582
559const struct iwl_cfg iwl22000_2ax_cfg_jf = { 583const struct iwl_cfg iwl22000_2ax_cfg_jf = {
560 .name = "Intel(R) Dual Band Wireless AX 22000", 584 .name = "Intel(R) Dual Band Wireless AX 22000",
561 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, 585 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index d55312ef58c9..9b0bb89599fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -776,7 +776,6 @@ struct iwl_rss_config_cmd {
776 u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; 776 u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
777} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ 777} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
778 778
779#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
780#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 779#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
781#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf 780#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
782 781
@@ -812,10 +811,12 @@ struct iwl_rxq_sync_notification {
812 * 811 *
813 * @IWL_MVM_RXQ_EMPTY: empty sync notification 812 * @IWL_MVM_RXQ_EMPTY: empty sync notification
814 * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA 813 * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
814 * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
815 */ 815 */
816enum iwl_mvm_rxq_notif_type { 816enum iwl_mvm_rxq_notif_type {
817 IWL_MVM_RXQ_EMPTY, 817 IWL_MVM_RXQ_EMPTY,
818 IWL_MVM_RXQ_NOTIF_DEL_BA, 818 IWL_MVM_RXQ_NOTIF_DEL_BA,
819 IWL_MVM_RXQ_NSSN_SYNC,
819}; 820};
820 821
821/** 822/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index e411ac98290d..4d81776f576d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -2438,17 +2438,19 @@ static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt,
2438{ 2438{
2439 u32 img_name_len = le32_to_cpu(dbg_info->img_name_len); 2439 u32 img_name_len = le32_to_cpu(dbg_info->img_name_len);
2440 u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len); 2440 u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len);
2441 const char err_str[] =
2442 "WRT: ext=%d. Invalid %s name length %d, expected %d\n";
2443 2441
2444 if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) { 2442 if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) {
2445 IWL_WARN(fwrt, err_str, ext, "image", img_name_len, 2443 IWL_WARN(fwrt,
2444 "WRT: ext=%d. Invalid image name length %d, expected %d\n",
2445 ext, img_name_len,
2446 IWL_FW_INI_MAX_IMG_NAME_LEN); 2446 IWL_FW_INI_MAX_IMG_NAME_LEN);
2447 return; 2447 return;
2448 } 2448 }
2449 2449
2450 if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) { 2450 if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) {
2451 IWL_WARN(fwrt, err_str, ext, "debug cfg", dbg_cfg_name_len, 2451 IWL_WARN(fwrt,
2452 "WRT: ext=%d. Invalid debug cfg name length %d, expected %d\n",
2453 ext, dbg_cfg_name_len,
2452 IWL_FW_INI_MAX_DBG_CFG_NAME_LEN); 2454 IWL_FW_INI_MAX_DBG_CFG_NAME_LEN);
2453 return; 2455 return;
2454 } 2456 }
@@ -2775,8 +2777,6 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
2775 struct iwl_ucode_tlv *tlv = iter; 2777 struct iwl_ucode_tlv *tlv = iter;
2776 void *ini_tlv = (void *)tlv->data; 2778 void *ini_tlv = (void *)tlv->data;
2777 u32 type = le32_to_cpu(tlv->type); 2779 u32 type = le32_to_cpu(tlv->type);
2778 const char invalid_ap_str[] =
2779 "WRT: ext=%d. Invalid apply point %d for %s\n";
2780 2780
2781 switch (type) { 2781 switch (type) {
2782 case IWL_UCODE_TLV_TYPE_DEBUG_INFO: 2782 case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
@@ -2786,8 +2786,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
2786 struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv; 2786 struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv;
2787 2787
2788 if (pnt != IWL_FW_INI_APPLY_EARLY) { 2788 if (pnt != IWL_FW_INI_APPLY_EARLY) {
2789 IWL_ERR(fwrt, invalid_ap_str, ext, pnt, 2789 IWL_ERR(fwrt,
2790 "buffer allocation"); 2790 "WRT: ext=%d. Invalid apply point %d for buffer allocation\n",
2791 ext, pnt);
2791 goto next; 2792 goto next;
2792 } 2793 }
2793 2794
@@ -2797,8 +2798,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
2797 } 2798 }
2798 case IWL_UCODE_TLV_TYPE_HCMD: 2799 case IWL_UCODE_TLV_TYPE_HCMD:
2799 if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { 2800 if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
2800 IWL_ERR(fwrt, invalid_ap_str, ext, pnt, 2801 IWL_ERR(fwrt,
2801 "host command"); 2802 "WRT: ext=%d. Invalid apply point %d for host command\n",
2803 ext, pnt);
2802 goto next; 2804 goto next;
2803 } 2805 }
2804 iwl_fw_dbg_send_hcmd(fwrt, tlv, ext); 2806 iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 1c1bf1b281cd..6c04f8223aff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
577extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; 577extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
578extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; 578extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
579extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; 579extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
580extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
581extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
580extern const struct iwl_cfg killer1650x_2ax_cfg; 582extern const struct iwl_cfg killer1650x_2ax_cfg;
581extern const struct iwl_cfg killer1650w_2ax_cfg; 583extern const struct iwl_cfg killer1650w_2ax_cfg;
582extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; 584extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 57d09049e615..38672dd5aae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1640,6 +1640,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
1640 init_completion(&drv->request_firmware_complete); 1640 init_completion(&drv->request_firmware_complete);
1641 INIT_LIST_HEAD(&drv->list); 1641 INIT_LIST_HEAD(&drv->list);
1642 1642
1643 iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans);
1644
1643#ifdef CONFIG_IWLWIFI_DEBUGFS 1645#ifdef CONFIG_IWLWIFI_DEBUGFS
1644 /* Create the device debugfs entries. */ 1646 /* Create the device debugfs entries. */
1645 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), 1647 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
@@ -1660,8 +1662,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
1660err_fw: 1662err_fw:
1661#ifdef CONFIG_IWLWIFI_DEBUGFS 1663#ifdef CONFIG_IWLWIFI_DEBUGFS
1662 debugfs_remove_recursive(drv->dbgfs_drv); 1664 debugfs_remove_recursive(drv->dbgfs_drv);
1663 iwl_fw_dbg_free(drv->trans);
1664#endif 1665#endif
1666 iwl_fw_dbg_free(drv->trans);
1665 kfree(drv); 1667 kfree(drv);
1666err: 1668err:
1667 return ERR_PTR(ret); 1669 return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1d608e9e9101..5de54d1559dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -755,7 +755,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
755 755
756 for (i = 0; i < n_profiles; i++) { 756 for (i = 0; i < n_profiles; i++) {
757 /* the tables start at element 3 */ 757 /* the tables start at element 3 */
758 static int pos = 3; 758 int pos = 3;
759 759
760 /* The EWRD profiles officially go from 2 to 4, but we 760 /* The EWRD profiles officially go from 2 to 4, but we
761 * save them in sar_profiles[1-3] (because we don't 761 * save them in sar_profiles[1-3] (because we don't
@@ -880,6 +880,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
880 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 880 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
881} 881}
882 882
883static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
884{
885 /*
886 * The GEO_TX_POWER_LIMIT command is not supported on earlier
887 * firmware versions. Unfortunately, we don't have a TLV API
888 * flag to rely on, so rely on the major version which is in
889 * the first byte of ucode_ver. This was implemented
890 * initially on version 38 and then backported to 36, 29 and
891 * 17.
892 */
893 return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
894 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
895 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
896 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
897}
898
883int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) 899int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
884{ 900{
885 struct iwl_geo_tx_power_profiles_resp *resp; 901 struct iwl_geo_tx_power_profiles_resp *resp;
@@ -909,6 +925,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
909 .data = { data }, 925 .data = { data },
910 }; 926 };
911 927
928 if (!iwl_mvm_sar_geo_support(mvm))
929 return -EOPNOTSUPP;
930
912 ret = iwl_mvm_send_cmd(mvm, &cmd); 931 ret = iwl_mvm_send_cmd(mvm, &cmd);
913 if (ret) { 932 if (ret) {
914 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); 933 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -934,13 +953,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
934 int ret, i, j; 953 int ret, i, j;
935 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); 954 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
936 955
937 /* 956 if (!iwl_mvm_sar_geo_support(mvm))
938 * This command is not supported on earlier firmware versions.
939 * Unfortunately, we don't have a TLV API flag to rely on, so
940 * rely on the major version which is in the first byte of
941 * ucode_ver.
942 */
943 if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
944 return 0; 957 return 0;
945 958
946 ret = iwl_mvm_sar_get_wgds_table(mvm); 959 ret = iwl_mvm_sar_get_wgds_table(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index cb22d447fcb8..fe776e35b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
554 cpu_to_le32(vif->bss_conf.use_short_slot ? 554 cpu_to_le32(vif->bss_conf.use_short_slot ?
555 MAC_FLG_SHORT_SLOT : 0); 555 MAC_FLG_SHORT_SLOT : 0);
556 556
557 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); 557 cmd->filter_flags = 0;
558 558
559 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 559 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); 560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
623 /* We need the dtim_period to set the MAC as associated */ 623 /* We need the dtim_period to set the MAC as associated */
624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
625 !force_assoc_off) { 625 !force_assoc_off) {
626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
627 u8 ap_sta_id = mvmvif->ap_sta_id;
626 u32 dtim_offs; 628 u32 dtim_offs;
627 629
628 /* 630 /*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
658 dtim_offs); 660 dtim_offs);
659 661
660 ctxt_sta->is_assoc = cpu_to_le32(1); 662 ctxt_sta->is_assoc = cpu_to_le32(1);
663
664 /*
665 * allow multicast data frames only as long as the station is
666 * authorized, i.e., GTK keys are already installed (if needed)
667 */
668 if (ap_sta_id < IWL_MVM_STATION_COUNT) {
669 struct ieee80211_sta *sta;
670
671 rcu_read_lock();
672
673 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
674 if (!IS_ERR_OR_NULL(sta)) {
675 struct iwl_mvm_sta *mvmsta =
676 iwl_mvm_sta_from_mac80211(sta);
677
678 if (mvmsta->sta_state ==
679 IEEE80211_STA_AUTHORIZED)
680 cmd.filter_flags |=
681 cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
682 }
683
684 rcu_read_unlock();
685 }
661 } else { 686 } else {
662 ctxt_sta->is_assoc = cpu_to_le32(0); 687 ctxt_sta->is_assoc = cpu_to_le32(0);
663 688
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
703 MAC_FILTER_IN_CONTROL_AND_MGMT | 728 MAC_FILTER_IN_CONTROL_AND_MGMT |
704 MAC_FILTER_IN_BEACON | 729 MAC_FILTER_IN_BEACON |
705 MAC_FILTER_IN_PROBE_REQUEST | 730 MAC_FILTER_IN_PROBE_REQUEST |
706 MAC_FILTER_IN_CRC32); 731 MAC_FILTER_IN_CRC32 |
732 MAC_FILTER_ACCEPT_GRP);
707 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); 733 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
708 734
709 /* Allocate sniffer station */ 735 /* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
727 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); 753 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
728 754
729 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | 755 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
730 MAC_FILTER_IN_PROBE_REQUEST); 756 MAC_FILTER_IN_PROBE_REQUEST |
757 MAC_FILTER_ACCEPT_GRP);
731 758
732 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ 759 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
733 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); 760 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 55cd49ccbf0b..a7bc00d1296f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
207 }, 207 },
208}; 208};
209 209
210static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 210static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
211 enum set_key_cmd cmd, 211 enum set_key_cmd cmd,
212 struct ieee80211_vif *vif, 212 struct ieee80211_vif *vif,
213 struct ieee80211_sta *sta, 213 struct ieee80211_sta *sta,
214 struct ieee80211_key_conf *key); 214 struct ieee80211_key_conf *key);
215 215
216void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 216void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
217{ 217{
@@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
474 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 474 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
475 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); 475 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
476 ieee80211_hw_set(hw, STA_MMPDU_TXQ); 476 ieee80211_hw_set(hw, STA_MMPDU_TXQ);
477 ieee80211_hw_set(hw, TX_AMSDU); 477 /*
478 * On older devices, enabling TX A-MSDU occasionally leads to
479 * something getting messed up, the command read from the FIFO
480 * gets out of sync and isn't a TX command, so that we have an
481 * assert EDC.
482 *
483 * It's not clear where the bug is, but since we didn't used to
484 * support A-MSDU until moving the mac80211 iTXQs, just leave it
485 * for older devices. We also don't see this issue on any newer
486 * devices.
487 */
488 if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000)
489 ieee80211_hw_set(hw, TX_AMSDU);
478 ieee80211_hw_set(hw, TX_FRAG_LIST); 490 ieee80211_hw_set(hw, TX_FRAG_LIST);
479 491
480 if (iwl_mvm_has_tlc_offload(mvm)) { 492 if (iwl_mvm_has_tlc_offload(mvm)) {
@@ -2726,7 +2738,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2726 2738
2727 mvmvif->ap_early_keys[i] = NULL; 2739 mvmvif->ap_early_keys[i] = NULL;
2728 2740
2729 ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); 2741 ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
2730 if (ret) 2742 if (ret)
2731 goto out_quota_failed; 2743 goto out_quota_failed;
2732 } 2744 }
@@ -3315,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3315 /* enable beacon filtering */ 3327 /* enable beacon filtering */
3316 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
3317 3329
3330 /*
3331 * Now that the station is authorized, i.e., keys were already
3332 * installed, need to indicate to the FW that
3333 * multicast data frames can be forwarded to the driver
3334 */
3335 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3336
3318 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3337 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3319 true); 3338 true);
3320 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3339 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
3321 new_state == IEEE80211_STA_ASSOC) { 3340 new_state == IEEE80211_STA_ASSOC) {
3341 /* Multicast data frames are no longer allowed */
3342 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3343
3322 /* disable beacon filtering */ 3344 /* disable beacon filtering */
3323 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3345 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3324 WARN_ON(ret && 3346 WARN_ON(ret &&
@@ -3494,11 +3516,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
3494 return ret; 3516 return ret;
3495} 3517}
3496 3518
3497static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3519static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3498 enum set_key_cmd cmd, 3520 enum set_key_cmd cmd,
3499 struct ieee80211_vif *vif, 3521 struct ieee80211_vif *vif,
3500 struct ieee80211_sta *sta, 3522 struct ieee80211_sta *sta,
3501 struct ieee80211_key_conf *key) 3523 struct ieee80211_key_conf *key)
3502{ 3524{
3503 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3504 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3526 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -3553,8 +3575,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3553 return -EOPNOTSUPP; 3575 return -EOPNOTSUPP;
3554 } 3576 }
3555 3577
3556 mutex_lock(&mvm->mutex);
3557
3558 switch (cmd) { 3578 switch (cmd) {
3559 case SET_KEY: 3579 case SET_KEY:
3560 if ((vif->type == NL80211_IFTYPE_ADHOC || 3580 if ((vif->type == NL80211_IFTYPE_ADHOC ||
@@ -3700,7 +3720,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3700 ret = -EINVAL; 3720 ret = -EINVAL;
3701 } 3721 }
3702 3722
3723 return ret;
3724}
3725
3726static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3727 enum set_key_cmd cmd,
3728 struct ieee80211_vif *vif,
3729 struct ieee80211_sta *sta,
3730 struct ieee80211_key_conf *key)
3731{
3732 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3733 int ret;
3734
3735 mutex_lock(&mvm->mutex);
3736 ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
3703 mutex_unlock(&mvm->mutex); 3737 mutex_unlock(&mvm->mutex);
3738
3704 return ret; 3739 return ret;
3705} 3740}
3706 3741
@@ -5041,7 +5076,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
5041 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 5076 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
5042 int ret; 5077 int ret;
5043 5078
5044 lockdep_assert_held(&mvm->mutex);
5045 5079
5046 if (!iwl_mvm_has_new_rx_api(mvm)) 5080 if (!iwl_mvm_has_new_rx_api(mvm))
5047 return; 5081 return;
@@ -5052,13 +5086,15 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
5052 atomic_set(&mvm->queue_sync_counter, 5086 atomic_set(&mvm->queue_sync_counter,
5053 mvm->trans->num_rx_queues); 5087 mvm->trans->num_rx_queues);
5054 5088
5055 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); 5089 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
5090 size, !notif->sync);
5056 if (ret) { 5091 if (ret) {
5057 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 5092 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
5058 goto out; 5093 goto out;
5059 } 5094 }
5060 5095
5061 if (notif->sync) { 5096 if (notif->sync) {
5097 lockdep_assert_held(&mvm->mutex);
5062 ret = wait_event_timeout(mvm->rx_sync_waitq, 5098 ret = wait_event_timeout(mvm->rx_sync_waitq,
5063 atomic_read(&mvm->queue_sync_counter) == 0 || 5099 atomic_read(&mvm->queue_sync_counter) == 0 ||
5064 iwl_mvm_is_radio_killed(mvm), 5100 iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 48c77af54e99..a263cc629d75 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1664,9 +1664,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
1664void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, 1664void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1665 struct iwl_rx_cmd_buffer *rxb, int queue); 1665 struct iwl_rx_cmd_buffer *rxb, int queue);
1666int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, 1666int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
1667 const u8 *data, u32 count); 1667 const u8 *data, u32 count, bool async);
1668void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 1668void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
1669 int queue); 1669 struct iwl_rx_cmd_buffer *rxb, int queue);
1670void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); 1670void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1671void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, 1671void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
1672 struct iwl_rx_cmd_buffer *rxb); 1672 struct iwl_rx_cmd_buffer *rxb);
@@ -1813,7 +1813,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1813#endif /* CONFIG_IWLWIFI_DEBUGFS */ 1813#endif /* CONFIG_IWLWIFI_DEBUGFS */
1814 1814
1815/* rate scaling */ 1815/* rate scaling */
1816int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync); 1816int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
1817void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); 1817void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
1818int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); 1818int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
1819void rs_update_last_rssi(struct iwl_mvm *mvm, 1819void rs_update_last_rssi(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 719f793b3487..a9bb43a2f27b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -620,7 +620,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
620 enum iwl_mcc_source src; 620 enum iwl_mcc_source src;
621 char mcc[3]; 621 char mcc[3];
622 struct ieee80211_regdomain *regd; 622 struct ieee80211_regdomain *regd;
623 u32 wgds_tbl_idx; 623 int wgds_tbl_idx;
624 624
625 lockdep_assert_held(&mvm->mutex); 625 lockdep_assert_held(&mvm->mutex);
626 626
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d7d6f3398f86..4888054dc3d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1088,7 +1088,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1088 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); 1088 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1089 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, 1089 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1090 RX_QUEUES_NOTIFICATION))) 1090 RX_QUEUES_NOTIFICATION)))
1091 iwl_mvm_rx_queue_notif(mvm, rxb, 0); 1091 iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
1092 else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) 1092 else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1093 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); 1093 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1094 else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) 1094 else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
@@ -1812,7 +1812,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1812 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); 1812 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1813 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, 1813 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1814 RX_QUEUES_NOTIFICATION))) 1814 RX_QUEUES_NOTIFICATION)))
1815 iwl_mvm_rx_queue_notif(mvm, rxb, queue); 1815 iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
1816 else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) 1816 else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1817 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); 1817 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1818} 1818}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 8c9069f28a58..d3f04acfbacb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1197,239 +1197,6 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
1197 return tid; 1197 return tid;
1198} 1198}
1199 1199
1200void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1201 int tid, struct ieee80211_tx_info *info, bool ndp)
1202{
1203 int legacy_success;
1204 int retries;
1205 int i;
1206 struct iwl_lq_cmd *table;
1207 u32 lq_hwrate;
1208 struct rs_rate lq_rate, tx_resp_rate;
1209 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1210 u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
1211 u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
1212 u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
1213 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
1214 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1215 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
1216
1217 /* Treat uninitialized rate scaling data same as non-existing. */
1218 if (!lq_sta) {
1219 IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
1220 return;
1221 } else if (!lq_sta->pers.drv) {
1222 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
1223 return;
1224 }
1225
1226 /* This packet was aggregated but doesn't carry status info */
1227 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
1228 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1229 return;
1230
1231 if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
1232 &tx_resp_rate)) {
1233 WARN_ON_ONCE(1);
1234 return;
1235 }
1236
1237#ifdef CONFIG_MAC80211_DEBUGFS
1238 /* Disable last tx check if we are debugging with fixed rate but
1239 * update tx stats */
1240 if (lq_sta->pers.dbg_fixed_rate) {
1241 int index = tx_resp_rate.index;
1242 enum rs_column column;
1243 int attempts, success;
1244
1245 column = rs_get_column_from_rate(&tx_resp_rate);
1246 if (WARN_ONCE(column == RS_COLUMN_INVALID,
1247 "Can't map rate 0x%x to column",
1248 tx_resp_hwrate))
1249 return;
1250
1251 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1252 attempts = info->status.ampdu_len;
1253 success = info->status.ampdu_ack_len;
1254 } else {
1255 attempts = info->status.rates[0].count;
1256 success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1257 }
1258
1259 lq_sta->pers.tx_stats[column][index].total += attempts;
1260 lq_sta->pers.tx_stats[column][index].success += success;
1261
1262 IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
1263 tx_resp_hwrate, success, attempts);
1264 return;
1265 }
1266#endif
1267
1268 if (time_after(jiffies,
1269 (unsigned long)(lq_sta->last_tx +
1270 (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
1271 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
1272 iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
1273 return;
1274 }
1275 lq_sta->last_tx = jiffies;
1276
1277 /* Ignore this Tx frame response if its initial rate doesn't match
1278 * that of latest Link Quality command. There may be stragglers
1279 * from a previous Link Quality command, but we're no longer interested
1280 * in those; they're either from the "active" mode while we're trying
1281 * to check "search" mode, or a prior "search" mode after we've moved
1282 * to a new "search" mode (which might become the new "active" mode).
1283 */
1284 table = &lq_sta->lq;
1285 lq_hwrate = le32_to_cpu(table->rs_table[0]);
1286 if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
1287 WARN_ON_ONCE(1);
1288 return;
1289 }
1290
1291 /* Here we actually compare this rate to the latest LQ command */
1292 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
1293 IWL_DEBUG_RATE(mvm,
1294 "tx resp color 0x%x does not match 0x%x\n",
1295 lq_color, LQ_FLAG_COLOR_GET(table->flags));
1296
1297 /*
1298 * Since rates mis-match, the last LQ command may have failed.
1299 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
1300 * ... driver.
1301 */
1302 lq_sta->missed_rate_counter++;
1303 if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
1304 lq_sta->missed_rate_counter = 0;
1305 IWL_DEBUG_RATE(mvm,
1306 "Too many rates mismatch. Send sync LQ. rs_state %d\n",
1307 lq_sta->rs_state);
1308 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1309 }
1310 /* Regardless, ignore this status info for outdated rate */
1311 return;
1312 } else
1313 /* Rate did match, so reset the missed_rate_counter */
1314 lq_sta->missed_rate_counter = 0;
1315
1316 if (!lq_sta->search_better_tbl) {
1317 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1318 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1319 } else {
1320 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1321 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1322 }
1323
1324 if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
1325 IWL_DEBUG_RATE(mvm,
1326 "Neither active nor search matches tx rate\n");
1327 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1328 rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
1329 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1330 rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
1331 rs_dump_rate(mvm, &lq_rate, "ACTUAL");
1332
1333 /*
1334 * no matching table found, let's by-pass the data collection
1335 * and continue to perform rate scale to find the rate table
1336 */
1337 rs_stay_in_table(lq_sta, true);
1338 goto done;
1339 }
1340
1341 /*
1342 * Updating the frame history depends on whether packets were
1343 * aggregated.
1344 *
1345 * For aggregation, all packets were transmitted at the same rate, the
1346 * first index into rate scale table.
1347 */
1348 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1349 rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
1350 info->status.ampdu_len,
1351 info->status.ampdu_ack_len,
1352 reduced_txp);
1353
1354 /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
1355 * it as a single frame loss as we don't want the success ratio
1356 * to dip too quickly because a BA wasn't received.
1357 * For TPC, there's no need for this optimisation since we want
1358 * to recover very quickly from a bad power reduction and,
1359 * therefore we'd like the success ratio to get an immediate hit
1360 * when failing to get a BA, so we'd switch back to a lower or
1361 * zero power reduction. When FW transmits agg with a rate
1362 * different from the initial rate, it will not use reduced txp
1363 * and will send BA notification twice (one empty with reduced
1364 * txp equal to the value from LQ and one with reduced txp 0).
1365 * We need to update counters for each txp level accordingly.
1366 */
1367 if (info->status.ampdu_ack_len == 0)
1368 info->status.ampdu_len = 1;
1369
1370 rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
1371 info->status.ampdu_len,
1372 info->status.ampdu_ack_len);
1373
1374 /* Update success/fail counts if not searching for new mode */
1375 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1376 lq_sta->total_success += info->status.ampdu_ack_len;
1377 lq_sta->total_failed += (info->status.ampdu_len -
1378 info->status.ampdu_ack_len);
1379 }
1380 } else {
1381 /* For legacy, update frame history with for each Tx retry. */
1382 retries = info->status.rates[0].count - 1;
1383 /* HW doesn't send more than 15 retries */
1384 retries = min(retries, 15);
1385
1386 /* The last transmission may have been successful */
1387 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1388 /* Collect data for each rate used during failed TX attempts */
1389 for (i = 0; i <= retries; ++i) {
1390 lq_hwrate = le32_to_cpu(table->rs_table[i]);
1391 if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
1392 &lq_rate)) {
1393 WARN_ON_ONCE(1);
1394 return;
1395 }
1396
1397 /*
1398 * Only collect stats if retried rate is in the same RS
1399 * table as active/search.
1400 */
1401 if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
1402 tmp_tbl = curr_tbl;
1403 else if (rs_rate_column_match(&lq_rate,
1404 &other_tbl->rate))
1405 tmp_tbl = other_tbl;
1406 else
1407 continue;
1408
1409 rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
1410 tx_resp_rate.index, 1,
1411 i < retries ? 0 : legacy_success,
1412 reduced_txp);
1413 rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
1414 tx_resp_rate.index, 1,
1415 i < retries ? 0 : legacy_success);
1416 }
1417
1418 /* Update success/fail counts if not searching for new mode */
1419 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1420 lq_sta->total_success += legacy_success;
1421 lq_sta->total_failed += retries + (1 - legacy_success);
1422 }
1423 }
1424 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1425 lq_sta->last_rate_n_flags = lq_hwrate;
1426 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
1427done:
1428 /* See if there's a better rate or modulation mode to try. */
1429 if (sta->supp_rates[info->band])
1430 rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
1431}
1432
1433/* 1200/*
1434 * mac80211 sends us Tx status 1201 * mac80211 sends us Tx status
1435 */ 1202 */
@@ -1442,8 +1209,9 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
1442 struct iwl_op_mode *op_mode = mvm_r; 1209 struct iwl_op_mode *op_mode = mvm_r;
1443 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1210 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1444 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1211 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1212 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1445 1213
1446 if (!iwl_mvm_sta_from_mac80211(sta)->vif) 1214 if (!mvmsta->vif)
1447 return; 1215 return;
1448 1216
1449 if (!ieee80211_is_data(hdr->frame_control) || 1217 if (!ieee80211_is_data(hdr->frame_control) ||
@@ -1584,6 +1352,18 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1584 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); 1352 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
1585} 1353}
1586 1354
1355/* rs uses two tables, one is active and the second is for searching better
1356 * configuration. This function, according to the index of the currently
1357 * active table returns the search table, which is located at the
1358 * index complementary to 1 according to the active table (active = 1,
1359 * search = 0 or active = 0, search = 1).
1360 * Since lq_info is an arary of size 2, make sure index cannot be out of bounds.
1361 */
1362static inline u8 rs_search_tbl(u8 active_tbl)
1363{
1364 return (active_tbl ^ 1) & 1;
1365}
1366
1587static s32 rs_get_best_rate(struct iwl_mvm *mvm, 1367static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1588 struct iwl_lq_sta *lq_sta, 1368 struct iwl_lq_sta *lq_sta,
1589 struct iwl_scale_tbl_info *tbl, /* "search" */ 1369 struct iwl_scale_tbl_info *tbl, /* "search" */
@@ -1794,7 +1574,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
1794 struct iwl_scale_tbl_info *tbl) 1574 struct iwl_scale_tbl_info *tbl)
1795{ 1575{
1796 rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); 1576 rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
1797 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); 1577 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
1798} 1578}
1799 1579
1800static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, 1580static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
@@ -1931,9 +1711,9 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
1931 struct ieee80211_sta *sta, 1711 struct ieee80211_sta *sta,
1932 enum rs_column col_id) 1712 enum rs_column col_id)
1933{ 1713{
1934 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1714 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
1935 struct iwl_scale_tbl_info *search_tbl = 1715 struct iwl_scale_tbl_info *search_tbl =
1936 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1716 &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
1937 struct rs_rate *rate = &search_tbl->rate; 1717 struct rs_rate *rate = &search_tbl->rate;
1938 const struct rs_tx_column *column = &rs_tx_columns[col_id]; 1718 const struct rs_tx_column *column = &rs_tx_columns[col_id];
1939 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; 1719 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
@@ -2341,7 +2121,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2341 if (!lq_sta->search_better_tbl) 2121 if (!lq_sta->search_better_tbl)
2342 active_tbl = lq_sta->active_tbl; 2122 active_tbl = lq_sta->active_tbl;
2343 else 2123 else
2344 active_tbl = 1 - lq_sta->active_tbl; 2124 active_tbl = rs_search_tbl(lq_sta->active_tbl);
2345 2125
2346 tbl = &(lq_sta->lq_info[active_tbl]); 2126 tbl = &(lq_sta->lq_info[active_tbl]);
2347 rate = &tbl->rate; 2127 rate = &tbl->rate;
@@ -2565,7 +2345,7 @@ lq_update:
2565 /* If new "search" mode was selected, set up in uCode table */ 2345 /* If new "search" mode was selected, set up in uCode table */
2566 if (lq_sta->search_better_tbl) { 2346 if (lq_sta->search_better_tbl) {
2567 /* Access the "search" table, clear its history. */ 2347 /* Access the "search" table, clear its history. */
2568 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 2348 tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
2569 rs_rate_scale_clear_tbl_windows(mvm, tbl); 2349 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2570 2350
2571 /* Use new "search" start rate */ 2351 /* Use new "search" start rate */
@@ -2896,7 +2676,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
2896static void rs_initialize_lq(struct iwl_mvm *mvm, 2676static void rs_initialize_lq(struct iwl_mvm *mvm,
2897 struct ieee80211_sta *sta, 2677 struct ieee80211_sta *sta,
2898 struct iwl_lq_sta *lq_sta, 2678 struct iwl_lq_sta *lq_sta,
2899 enum nl80211_band band, bool update) 2679 enum nl80211_band band)
2900{ 2680{
2901 struct iwl_scale_tbl_info *tbl; 2681 struct iwl_scale_tbl_info *tbl;
2902 struct rs_rate *rate; 2682 struct rs_rate *rate;
@@ -2908,7 +2688,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2908 if (!lq_sta->search_better_tbl) 2688 if (!lq_sta->search_better_tbl)
2909 active_tbl = lq_sta->active_tbl; 2689 active_tbl = lq_sta->active_tbl;
2910 else 2690 else
2911 active_tbl = 1 - lq_sta->active_tbl; 2691 active_tbl = rs_search_tbl(lq_sta->active_tbl);
2912 2692
2913 tbl = &(lq_sta->lq_info[active_tbl]); 2693 tbl = &(lq_sta->lq_info[active_tbl]);
2914 rate = &tbl->rate; 2694 rate = &tbl->rate;
@@ -2926,7 +2706,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2926 rs_set_expected_tpt_table(lq_sta, tbl); 2706 rs_set_expected_tpt_table(lq_sta, tbl);
2927 rs_fill_lq_cmd(mvm, sta, lq_sta, rate); 2707 rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
2928 /* TODO restore station should remember the lq cmd */ 2708 /* TODO restore station should remember the lq cmd */
2929 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update); 2709 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
2930} 2710}
2931 2711
2932static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, 2712static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3175,7 +2955,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
3175 * Called after adding a new station to initialize rate scaling 2955 * Called after adding a new station to initialize rate scaling
3176 */ 2956 */
3177static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2957static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3178 enum nl80211_band band, bool update) 2958 enum nl80211_band band)
3179{ 2959{
3180 int i, j; 2960 int i, j;
3181 struct ieee80211_hw *hw = mvm->hw; 2961 struct ieee80211_hw *hw = mvm->hw;
@@ -3186,6 +2966,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3186 struct ieee80211_supported_band *sband; 2966 struct ieee80211_supported_band *sband;
3187 unsigned long supp; /* must be unsigned long for for_each_set_bit */ 2967 unsigned long supp; /* must be unsigned long for for_each_set_bit */
3188 2968
2969 lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock);
2970
3189 /* clear all non-persistent lq data */ 2971 /* clear all non-persistent lq data */
3190 memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); 2972 memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
3191 2973
@@ -3255,7 +3037,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3255#ifdef CONFIG_IWLWIFI_DEBUGFS 3037#ifdef CONFIG_IWLWIFI_DEBUGFS
3256 iwl_mvm_reset_frame_stats(mvm); 3038 iwl_mvm_reset_frame_stats(mvm);
3257#endif 3039#endif
3258 rs_initialize_lq(mvm, sta, lq_sta, band, update); 3040 rs_initialize_lq(mvm, sta, lq_sta, band);
3259} 3041}
3260 3042
3261static void rs_drv_rate_update(void *mvm_r, 3043static void rs_drv_rate_update(void *mvm_r,
@@ -3278,6 +3060,258 @@ static void rs_drv_rate_update(void *mvm_r,
3278 iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); 3060 iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
3279} 3061}
3280 3062
3063static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
3064 struct ieee80211_sta *sta,
3065 int tid, struct ieee80211_tx_info *info,
3066 bool ndp)
3067{
3068 int legacy_success;
3069 int retries;
3070 int i;
3071 struct iwl_lq_cmd *table;
3072 u32 lq_hwrate;
3073 struct rs_rate lq_rate, tx_resp_rate;
3074 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
3075 u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
3076 u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
3077 u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
3078 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
3079 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3080 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
3081
3082 /* Treat uninitialized rate scaling data same as non-existing. */
3083 if (!lq_sta) {
3084 IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
3085 return;
3086 } else if (!lq_sta->pers.drv) {
3087 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
3088 return;
3089 }
3090
3091 /* This packet was aggregated but doesn't carry status info */
3092 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
3093 !(info->flags & IEEE80211_TX_STAT_AMPDU))
3094 return;
3095
3096 if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
3097 &tx_resp_rate)) {
3098 WARN_ON_ONCE(1);
3099 return;
3100 }
3101
3102#ifdef CONFIG_MAC80211_DEBUGFS
3103 /* Disable last tx check if we are debugging with fixed rate but
3104 * update tx stats
3105 */
3106 if (lq_sta->pers.dbg_fixed_rate) {
3107 int index = tx_resp_rate.index;
3108 enum rs_column column;
3109 int attempts, success;
3110
3111 column = rs_get_column_from_rate(&tx_resp_rate);
3112 if (WARN_ONCE(column == RS_COLUMN_INVALID,
3113 "Can't map rate 0x%x to column",
3114 tx_resp_hwrate))
3115 return;
3116
3117 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
3118 attempts = info->status.ampdu_len;
3119 success = info->status.ampdu_ack_len;
3120 } else {
3121 attempts = info->status.rates[0].count;
3122 success = !!(info->flags & IEEE80211_TX_STAT_ACK);
3123 }
3124
3125 lq_sta->pers.tx_stats[column][index].total += attempts;
3126 lq_sta->pers.tx_stats[column][index].success += success;
3127
3128 IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
3129 tx_resp_hwrate, success, attempts);
3130 return;
3131 }
3132#endif
3133
3134 if (time_after(jiffies,
3135 (unsigned long)(lq_sta->last_tx +
3136 (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
3137 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
3138 /* reach here only in case of driver RS, call directly
3139 * the unlocked version
3140 */
3141 rs_drv_rate_init(mvm, sta, info->band);
3142 return;
3143 }
3144 lq_sta->last_tx = jiffies;
3145
3146 /* Ignore this Tx frame response if its initial rate doesn't match
3147 * that of latest Link Quality command. There may be stragglers
3148 * from a previous Link Quality command, but we're no longer interested
3149 * in those; they're either from the "active" mode while we're trying
3150 * to check "search" mode, or a prior "search" mode after we've moved
3151 * to a new "search" mode (which might become the new "active" mode).
3152 */
3153 table = &lq_sta->lq;
3154 lq_hwrate = le32_to_cpu(table->rs_table[0]);
3155 if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
3156 WARN_ON_ONCE(1);
3157 return;
3158 }
3159
3160 /* Here we actually compare this rate to the latest LQ command */
3161 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
3162 IWL_DEBUG_RATE(mvm,
3163 "tx resp color 0x%x does not match 0x%x\n",
3164 lq_color, LQ_FLAG_COLOR_GET(table->flags));
3165
3166 /* Since rates mis-match, the last LQ command may have failed.
3167 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
3168 * ... driver.
3169 */
3170 lq_sta->missed_rate_counter++;
3171 if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
3172 lq_sta->missed_rate_counter = 0;
3173 IWL_DEBUG_RATE(mvm,
3174 "Too many rates mismatch. Send sync LQ. rs_state %d\n",
3175 lq_sta->rs_state);
3176 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
3177 }
3178 /* Regardless, ignore this status info for outdated rate */
3179 return;
3180 }
3181
3182 /* Rate did match, so reset the missed_rate_counter */
3183 lq_sta->missed_rate_counter = 0;
3184
3185 if (!lq_sta->search_better_tbl) {
3186 curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3187 other_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
3188 } else {
3189 curr_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
3190 other_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3191 }
3192
3193 if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
3194 IWL_DEBUG_RATE(mvm,
3195 "Neither active nor search matches tx rate\n");
3196 tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3197 rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
3198 tmp_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
3199 rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
3200 rs_dump_rate(mvm, &lq_rate, "ACTUAL");
3201
3202 /* no matching table found, let's by-pass the data collection
3203 * and continue to perform rate scale to find the rate table
3204 */
3205 rs_stay_in_table(lq_sta, true);
3206 goto done;
3207 }
3208
3209 /* Updating the frame history depends on whether packets were
3210 * aggregated.
3211 *
3212 * For aggregation, all packets were transmitted at the same rate, the
3213 * first index into rate scale table.
3214 */
3215 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
3216 rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
3217 info->status.ampdu_len,
3218 info->status.ampdu_ack_len,
3219 reduced_txp);
3220
3221 /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
3222 * it as a single frame loss as we don't want the success ratio
3223 * to dip too quickly because a BA wasn't received.
3224 * For TPC, there's no need for this optimisation since we want
3225 * to recover very quickly from a bad power reduction and,
3226 * therefore we'd like the success ratio to get an immediate hit
3227 * when failing to get a BA, so we'd switch back to a lower or
3228 * zero power reduction. When FW transmits agg with a rate
3229 * different from the initial rate, it will not use reduced txp
3230 * and will send BA notification twice (one empty with reduced
3231 * txp equal to the value from LQ and one with reduced txp 0).
3232 * We need to update counters for each txp level accordingly.
3233 */
3234 if (info->status.ampdu_ack_len == 0)
3235 info->status.ampdu_len = 1;
3236
3237 rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
3238 tx_resp_rate.index,
3239 info->status.ampdu_len,
3240 info->status.ampdu_ack_len);
3241
3242 /* Update success/fail counts if not searching for new mode */
3243 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
3244 lq_sta->total_success += info->status.ampdu_ack_len;
3245 lq_sta->total_failed += (info->status.ampdu_len -
3246 info->status.ampdu_ack_len);
3247 }
3248 } else {
3249 /* For legacy, update frame history with for each Tx retry. */
3250 retries = info->status.rates[0].count - 1;
3251 /* HW doesn't send more than 15 retries */
3252 retries = min(retries, 15);
3253
3254 /* The last transmission may have been successful */
3255 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
3256 /* Collect data for each rate used during failed TX attempts */
3257 for (i = 0; i <= retries; ++i) {
3258 lq_hwrate = le32_to_cpu(table->rs_table[i]);
3259 if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
3260 &lq_rate)) {
3261 WARN_ON_ONCE(1);
3262 return;
3263 }
3264
3265 /* Only collect stats if retried rate is in the same RS
3266 * table as active/search.
3267 */
3268 if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
3269 tmp_tbl = curr_tbl;
3270 else if (rs_rate_column_match(&lq_rate,
3271 &other_tbl->rate))
3272 tmp_tbl = other_tbl;
3273 else
3274 continue;
3275
3276 rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
3277 tx_resp_rate.index, 1,
3278 i < retries ? 0 : legacy_success,
3279 reduced_txp);
3280 rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
3281 tx_resp_rate.index, 1,
3282 i < retries ? 0 : legacy_success);
3283 }
3284
3285 /* Update success/fail counts if not searching for new mode */
3286 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
3287 lq_sta->total_success += legacy_success;
3288 lq_sta->total_failed += retries + (1 - legacy_success);
3289 }
3290 }
3291 /* The last TX rate is cached in lq_sta; it's set in if/else above */
3292 lq_sta->last_rate_n_flags = lq_hwrate;
3293 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
3294done:
3295 /* See if there's a better rate or modulation mode to try. */
3296 if (sta->supp_rates[info->band])
3297 rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
3298}
3299
3300void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3301 int tid, struct ieee80211_tx_info *info, bool ndp)
3302{
3303 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3304
3305 /* If it's locked we are in middle of init flow
3306 * just wait for next tx status to update the lq_sta data
3307 */
3308 if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock))
3309 return;
3310
3311 __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp);
3312 spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
3313}
3314
3281#ifdef CONFIG_MAC80211_DEBUGFS 3315#ifdef CONFIG_MAC80211_DEBUGFS
3282static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, 3316static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
3283 struct iwl_lq_cmd *lq_cmd, 3317 struct iwl_lq_cmd *lq_cmd,
@@ -3569,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
3569 3603
3570 bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; 3604 bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
3571 bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params); 3605 bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
3572 iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false); 3606 iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
3573 3607
3574 ss_params |= LQ_SS_BFER_ALLOWED; 3608 ss_params |= LQ_SS_BFER_ALLOWED;
3575 IWL_DEBUG_RATE(mvm, 3609 IWL_DEBUG_RATE(mvm,
@@ -3735,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
3735 3769
3736 if (lq_sta->pers.dbg_fixed_rate) { 3770 if (lq_sta->pers.dbg_fixed_rate) {
3737 rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); 3771 rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
3738 iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false); 3772 iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
3739 } 3773 }
3740} 3774}
3741 3775
@@ -4132,10 +4166,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
4132void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 4166void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
4133 enum nl80211_band band, bool update) 4167 enum nl80211_band band, bool update)
4134{ 4168{
4135 if (iwl_mvm_has_tlc_offload(mvm)) 4169 if (iwl_mvm_has_tlc_offload(mvm)) {
4136 rs_fw_rate_init(mvm, sta, band, update); 4170 rs_fw_rate_init(mvm, sta, band, update);
4137 else 4171 } else {
4138 rs_drv_rate_init(mvm, sta, band, update); 4172 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4173
4174 spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock);
4175 rs_drv_rate_init(mvm, sta, band);
4176 spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
4177 }
4139} 4178}
4140 4179
4141int iwl_mvm_rate_control_register(void) 4180int iwl_mvm_rate_control_register(void)
@@ -4165,7 +4204,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
4165 lq->flags &= ~LQ_FLAG_USE_RTS_MSK; 4204 lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
4166 } 4205 }
4167 4206
4168 return iwl_mvm_send_lq_cmd(mvm, lq, false); 4207 return iwl_mvm_send_lq_cmd(mvm, lq);
4169} 4208}
4170 4209
4171/** 4210/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index f7eb60dbaf20..428642e66658 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -4,7 +4,7 @@
4 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2015 Intel Mobile Communications GmbH
6 * Copyright(c) 2017 Intel Deutschland GmbH 6 * Copyright(c) 2017 Intel Deutschland GmbH
7 * Copyright(c) 2018 Intel Corporation 7 * Copyright(c) 2018 - 2019 Intel Corporation
8 * 8 *
9 * Contact Information: 9 * Contact Information:
10 * Intel Linux Wireless <linuxwifi@intel.com> 10 * Intel Linux Wireless <linuxwifi@intel.com>
@@ -390,6 +390,7 @@ struct iwl_lq_sta {
390 s8 last_rssi; 390 s8 last_rssi;
391 struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; 391 struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
392 struct iwl_mvm *drv; 392 struct iwl_mvm *drv;
393 spinlock_t lock; /* for races in reinit/update table */
393 } pers; 394 } pers;
394}; 395};
395 396
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 64f950501287..854edd7d7103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -463,20 +463,22 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
463} 463}
464 464
465int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, 465int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
466 const u8 *data, u32 count) 466 const u8 *data, u32 count, bool async)
467{ 467{
468 struct iwl_rxq_sync_cmd *cmd; 468 u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
469 sizeof(struct iwl_mvm_rss_sync_notif)];
470 struct iwl_rxq_sync_cmd *cmd = (void *)buf;
469 u32 data_size = sizeof(*cmd) + count; 471 u32 data_size = sizeof(*cmd) + count;
470 int ret; 472 int ret;
471 473
472 /* should be DWORD aligned */ 474 /*
473 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE)) 475 * size must be a multiple of DWORD
476 * Ensure we don't overflow buf
477 */
478 if (WARN_ON(count & 3 ||
479 count > sizeof(struct iwl_mvm_rss_sync_notif)))
474 return -EINVAL; 480 return -EINVAL;
475 481
476 cmd = kzalloc(data_size, GFP_KERNEL);
477 if (!cmd)
478 return -ENOMEM;
479
480 cmd->rxq_mask = cpu_to_le32(rxq_mask); 482 cmd->rxq_mask = cpu_to_le32(rxq_mask);
481 cmd->count = cpu_to_le32(count); 483 cmd->count = cpu_to_le32(count);
482 cmd->flags = 0; 484 cmd->flags = 0;
@@ -485,9 +487,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
485 ret = iwl_mvm_send_cmd_pdu(mvm, 487 ret = iwl_mvm_send_cmd_pdu(mvm,
486 WIDE_ID(DATA_PATH_GROUP, 488 WIDE_ID(DATA_PATH_GROUP,
487 TRIGGER_RX_QUEUES_NOTIF_CMD), 489 TRIGGER_RX_QUEUES_NOTIF_CMD),
488 0, data_size, cmd); 490 async ? CMD_ASYNC : 0, data_size, cmd);
489 491
490 kfree(cmd);
491 return ret; 492 return ret;
492} 493}
493 494
@@ -503,14 +504,31 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
503 !ieee80211_sn_less(sn1, sn2 - buffer_size); 504 !ieee80211_sn_less(sn1, sn2 - buffer_size);
504} 505}
505 506
507static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
508{
509 struct iwl_mvm_rss_sync_notif notif = {
510 .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
511 .metadata.sync = 0,
512 .nssn_sync.baid = baid,
513 .nssn_sync.nssn = nssn,
514 };
515
516 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
517}
518
506#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) 519#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
507 520
521enum iwl_mvm_release_flags {
522 IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
523 IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
524};
525
508static void iwl_mvm_release_frames(struct iwl_mvm *mvm, 526static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
509 struct ieee80211_sta *sta, 527 struct ieee80211_sta *sta,
510 struct napi_struct *napi, 528 struct napi_struct *napi,
511 struct iwl_mvm_baid_data *baid_data, 529 struct iwl_mvm_baid_data *baid_data,
512 struct iwl_mvm_reorder_buffer *reorder_buf, 530 struct iwl_mvm_reorder_buffer *reorder_buf,
513 u16 nssn) 531 u16 nssn, u32 flags)
514{ 532{
515 struct iwl_mvm_reorder_buf_entry *entries = 533 struct iwl_mvm_reorder_buf_entry *entries =
516 &baid_data->entries[reorder_buf->queue * 534 &baid_data->entries[reorder_buf->queue *
@@ -519,6 +537,18 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
519 537
520 lockdep_assert_held(&reorder_buf->lock); 538 lockdep_assert_held(&reorder_buf->lock);
521 539
540 /*
541 * We keep the NSSN not too far behind, if we are sync'ing it and it
542 * is more than 2048 ahead of us, it must be behind us. Discard it.
543 * This can happen if the queue that hit the 0 / 2048 seqno was lagging
544 * behind and this queue already processed packets. The next if
545 * would have caught cases where this queue would have processed less
546 * than 64 packets, but it may have processed more than 64 packets.
547 */
548 if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
549 ieee80211_sn_less(nssn, ssn))
550 goto set_timer;
551
522 /* ignore nssn smaller than head sn - this can happen due to timeout */ 552 /* ignore nssn smaller than head sn - this can happen due to timeout */
523 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) 553 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
524 goto set_timer; 554 goto set_timer;
@@ -529,6 +559,9 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
529 struct sk_buff *skb; 559 struct sk_buff *skb;
530 560
531 ssn = ieee80211_sn_inc(ssn); 561 ssn = ieee80211_sn_inc(ssn);
562 if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
563 (ssn == 2048 || ssn == 0))
564 iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
532 565
533 /* 566 /*
534 * Empty the list. Will have more than one frame for A-MSDU. 567 * Empty the list. Will have more than one frame for A-MSDU.
@@ -615,7 +648,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
615 sta_id, sn); 648 sta_id, sn);
616 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, 649 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
617 sta, baid_data->tid); 650 sta, baid_data->tid);
618 iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn); 651 iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
652 buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
619 rcu_read_unlock(); 653 rcu_read_unlock();
620 } else { 654 } else {
621 /* 655 /*
@@ -657,7 +691,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
657 spin_lock_bh(&reorder_buf->lock); 691 spin_lock_bh(&reorder_buf->lock);
658 iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, 692 iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
659 ieee80211_sn_add(reorder_buf->head_sn, 693 ieee80211_sn_add(reorder_buf->head_sn,
660 reorder_buf->buf_size)); 694 reorder_buf->buf_size),
695 0);
661 spin_unlock_bh(&reorder_buf->lock); 696 spin_unlock_bh(&reorder_buf->lock);
662 del_timer_sync(&reorder_buf->reorder_timer); 697 del_timer_sync(&reorder_buf->reorder_timer);
663 698
@@ -665,8 +700,54 @@ out:
665 rcu_read_unlock(); 700 rcu_read_unlock();
666} 701}
667 702
668void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 703static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
669 int queue) 704 struct napi_struct *napi,
705 u8 baid, u16 nssn, int queue,
706 u32 flags)
707{
708 struct ieee80211_sta *sta;
709 struct iwl_mvm_reorder_buffer *reorder_buf;
710 struct iwl_mvm_baid_data *ba_data;
711
712 IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
713 baid, nssn);
714
715 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
716 baid >= ARRAY_SIZE(mvm->baid_map)))
717 return;
718
719 rcu_read_lock();
720
721 ba_data = rcu_dereference(mvm->baid_map[baid]);
722 if (WARN_ON_ONCE(!ba_data))
723 goto out;
724
725 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
726 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
727 goto out;
728
729 reorder_buf = &ba_data->reorder_buf[queue];
730
731 spin_lock_bh(&reorder_buf->lock);
732 iwl_mvm_release_frames(mvm, sta, napi, ba_data,
733 reorder_buf, nssn, flags);
734 spin_unlock_bh(&reorder_buf->lock);
735
736out:
737 rcu_read_unlock();
738}
739
740static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
741 struct napi_struct *napi, int queue,
742 const struct iwl_mvm_nssn_sync_data *data)
743{
744 iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
745 data->nssn, queue,
746 IWL_MVM_RELEASE_FROM_RSS_SYNC);
747}
748
749void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
750 struct iwl_rx_cmd_buffer *rxb, int queue)
670{ 751{
671 struct iwl_rx_packet *pkt = rxb_addr(rxb); 752 struct iwl_rx_packet *pkt = rxb_addr(rxb);
672 struct iwl_rxq_sync_notification *notif; 753 struct iwl_rxq_sync_notification *notif;
@@ -687,6 +768,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
687 case IWL_MVM_RXQ_NOTIF_DEL_BA: 768 case IWL_MVM_RXQ_NOTIF_DEL_BA:
688 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); 769 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
689 break; 770 break;
771 case IWL_MVM_RXQ_NSSN_SYNC:
772 iwl_mvm_nssn_sync(mvm, napi, queue,
773 (void *)internal_notif->data);
774 break;
690 default: 775 default:
691 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); 776 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
692 } 777 }
@@ -785,7 +870,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
785 } 870 }
786 871
787 if (ieee80211_is_back_req(hdr->frame_control)) { 872 if (ieee80211_is_back_req(hdr->frame_control)) {
788 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn); 873 iwl_mvm_release_frames(mvm, sta, napi, baid_data,
874 buffer, nssn, 0);
789 goto drop; 875 goto drop;
790 } 876 }
791 877
@@ -794,7 +880,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
794 * If the SN is smaller than the NSSN it might need to first go into 880 * If the SN is smaller than the NSSN it might need to first go into
795 * the reorder buffer, in which case we just release up to it and the 881 * the reorder buffer, in which case we just release up to it and the
796 * rest of the function will take care of storing it and releasing up to 882 * rest of the function will take care of storing it and releasing up to
797 * the nssn 883 * the nssn.
884 * This should not happen. This queue has been lagging and it should
885 * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
886 * and update the other queues.
798 */ 887 */
799 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, 888 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
800 buffer->buf_size) || 889 buffer->buf_size) ||
@@ -802,7 +891,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
802 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; 891 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
803 892
804 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, 893 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
805 min_sn); 894 min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
806 } 895 }
807 896
808 /* drop any oudated packets */ 897 /* drop any oudated packets */
@@ -813,8 +902,23 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
813 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { 902 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
814 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, 903 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
815 buffer->buf_size) && 904 buffer->buf_size) &&
816 (!amsdu || last_subframe)) 905 (!amsdu || last_subframe)) {
906 /*
907 * If we crossed the 2048 or 0 SN, notify all the
908 * queues. This is done in order to avoid having a
909 * head_sn that lags behind for too long. When that
910 * happens, we can get to a situation where the head_sn
911 * is within the interval [nssn - buf_size : nssn]
912 * which will make us think that the nssn is a packet
913 * that we already freed because of the reordering
914 * buffer and we will ignore it. So maintain the
915 * head_sn somewhat updated across all the queues:
916 * when it crosses 0 and 2048.
917 */
918 if (sn == 2048 || sn == 0)
919 iwl_mvm_sync_nssn(mvm, baid, sn);
817 buffer->head_sn = nssn; 920 buffer->head_sn = nssn;
921 }
818 /* No need to update AMSDU last SN - we are moving the head */ 922 /* No need to update AMSDU last SN - we are moving the head */
819 spin_unlock_bh(&buffer->lock); 923 spin_unlock_bh(&buffer->lock);
820 return false; 924 return false;
@@ -829,8 +933,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
829 * while technically there is no hole and we can move forward. 933 * while technically there is no hole and we can move forward.
830 */ 934 */
831 if (!buffer->num_stored && sn == buffer->head_sn) { 935 if (!buffer->num_stored && sn == buffer->head_sn) {
832 if (!amsdu || last_subframe) 936 if (!amsdu || last_subframe) {
937 if (sn == 2048 || sn == 0)
938 iwl_mvm_sync_nssn(mvm, baid, sn);
833 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); 939 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
940 }
834 /* No need to update AMSDU last SN - we are moving the head */ 941 /* No need to update AMSDU last SN - we are moving the head */
835 spin_unlock_bh(&buffer->lock); 942 spin_unlock_bh(&buffer->lock);
836 return false; 943 return false;
@@ -875,7 +982,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
875 * release notification with up to date NSSN. 982 * release notification with up to date NSSN.
876 */ 983 */
877 if (!amsdu || last_subframe) 984 if (!amsdu || last_subframe)
878 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn); 985 iwl_mvm_release_frames(mvm, sta, napi, baid_data,
986 buffer, nssn,
987 IWL_MVM_RELEASE_SEND_RSS_SYNC);
879 988
880 spin_unlock_bh(&buffer->lock); 989 spin_unlock_bh(&buffer->lock);
881 return true; 990 return true;
@@ -1840,40 +1949,14 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
1840out: 1949out:
1841 rcu_read_unlock(); 1950 rcu_read_unlock();
1842} 1951}
1952
1843void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, 1953void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1844 struct iwl_rx_cmd_buffer *rxb, int queue) 1954 struct iwl_rx_cmd_buffer *rxb, int queue)
1845{ 1955{
1846 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1956 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1847 struct iwl_frame_release *release = (void *)pkt->data; 1957 struct iwl_frame_release *release = (void *)pkt->data;
1848 struct ieee80211_sta *sta;
1849 struct iwl_mvm_reorder_buffer *reorder_buf;
1850 struct iwl_mvm_baid_data *ba_data;
1851
1852 int baid = release->baid;
1853
1854 IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
1855 release->baid, le16_to_cpu(release->nssn));
1856 1958
1857 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 1959 iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
1858 return; 1960 le16_to_cpu(release->nssn),
1859 1961 queue, 0);
1860 rcu_read_lock();
1861
1862 ba_data = rcu_dereference(mvm->baid_map[baid]);
1863 if (WARN_ON_ONCE(!ba_data))
1864 goto out;
1865
1866 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
1867 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1868 goto out;
1869
1870 reorder_buf = &ba_data->reorder_buf[queue];
1871
1872 spin_lock_bh(&reorder_buf->lock);
1873 iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
1874 le16_to_cpu(release->nssn));
1875 spin_unlock_bh(&reorder_buf->lock);
1876
1877out:
1878 rcu_read_unlock();
1879} 1962}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f545a737a92d..10f18536dd0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1684,6 +1684,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1684 */ 1684 */
1685 if (iwl_mvm_has_tlc_offload(mvm)) 1685 if (iwl_mvm_has_tlc_offload(mvm))
1686 iwl_mvm_rs_add_sta(mvm, mvm_sta); 1686 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1687 else
1688 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1687 1689
1688 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); 1690 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1689 1691
@@ -2421,7 +2423,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2421 2423
2422static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2424static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2423{ 2425{
2424 struct iwl_mvm_delba_notif notif = { 2426 struct iwl_mvm_rss_sync_notif notif = {
2425 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, 2427 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2426 .metadata.sync = 1, 2428 .metadata.sync = 1,
2427 .delba.baid = baid, 2429 .delba.baid = baid,
@@ -2972,7 +2974,7 @@ out:
2972 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 2974 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2973 sta->addr, tid); 2975 sta->addr, tid);
2974 2976
2975 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); 2977 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
2976} 2978}
2977 2979
2978static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 2980static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4487cc3e07c1..8d70093847cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -343,9 +343,17 @@ struct iwl_mvm_delba_data {
343 u32 baid; 343 u32 baid;
344} __packed; 344} __packed;
345 345
346struct iwl_mvm_delba_notif { 346struct iwl_mvm_nssn_sync_data {
347 u32 baid;
348 u32 nssn;
349} __packed;
350
351struct iwl_mvm_rss_sync_notif {
347 struct iwl_mvm_internal_rxq_notif metadata; 352 struct iwl_mvm_internal_rxq_notif metadata;
348 struct iwl_mvm_delba_data delba; 353 union {
354 struct iwl_mvm_delba_data delba;
355 struct iwl_mvm_nssn_sync_data nssn_sync;
356 };
349} __packed; 357} __packed;
350 358
351/** 359/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a3e5d88f1c07..6ac114a393cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
831 unsigned int tcp_payload_len; 831 unsigned int tcp_payload_len;
832 unsigned int mss = skb_shinfo(skb)->gso_size; 832 unsigned int mss = skb_shinfo(skb)->gso_size;
833 bool ipv4 = (skb->protocol == htons(ETH_P_IP)); 833 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
834 bool qos = ieee80211_is_data_qos(hdr->frame_control);
834 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; 835 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
835 836
836 skb_shinfo(skb)->gso_size = num_subframes * mss; 837 skb_shinfo(skb)->gso_size = num_subframes * mss;
@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
864 if (tcp_payload_len > mss) { 865 if (tcp_payload_len > mss) {
865 skb_shinfo(tmp)->gso_size = mss; 866 skb_shinfo(tmp)->gso_size = mss;
866 } else { 867 } else {
867 if (ieee80211_is_data_qos(hdr->frame_control)) { 868 if (qos) {
868 u8 *qc; 869 u8 *qc;
869 870
870 if (ipv4) 871 if (ipv4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 9ecd5f09615a..b8e20a01c192 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -653,12 +653,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
653 * this case to clear the state indicating that station creation is in 653 * this case to clear the state indicating that station creation is in
654 * progress. 654 * progress.
655 */ 655 */
656int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) 656int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
657{ 657{
658 struct iwl_host_cmd cmd = { 658 struct iwl_host_cmd cmd = {
659 .id = LQ_CMD, 659 .id = LQ_CMD,
660 .len = { sizeof(struct iwl_lq_cmd), }, 660 .len = { sizeof(struct iwl_lq_cmd), },
661 .flags = sync ? 0 : CMD_ASYNC, 661 .flags = CMD_ASYNC,
662 .data = { lq, }, 662 .data = { lq, },
663 }; 663 };
664 664
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ea2a03d4bf55..d9ed53b7c768 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -604,10 +604,13 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
604 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, 604 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
605 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, 605 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
606 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, 606 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
607 {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
607 {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)}, 608 {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
608 {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)}, 609 {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
609 {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)}, 610 {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
610 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)}, 611 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
612 {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
613 {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
611 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, 614 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
612 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, 615 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
613 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, 616 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
@@ -1059,7 +1062,28 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1059 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; 1062 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
1060 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1061 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; 1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
1065 else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
1066 iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
1067 else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
1068 iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
1062 } 1069 }
1070
1071 /* same thing for QuZ... */
1072 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
1073 if (cfg == &iwl_ax101_cfg_qu_hr)
1074 cfg = &iwl_ax101_cfg_quz_hr;
1075 else if (cfg == &iwl_ax201_cfg_qu_hr)
1076 cfg = &iwl_ax201_cfg_quz_hr;
1077 else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
1078 cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
1079 else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
1080 cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
1081 else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
1082 cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
1083 else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1084 cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
1085 }
1086
1063#endif 1087#endif
1064 1088
1065 pci_set_drvdata(pdev, iwl_trans); 1089 pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..db62c8314603 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3602,11 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3602 } 3602 }
3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3605 ((trans->cfg != &iwl_ax200_cfg_cc && 3605 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
3606 trans->cfg != &killer1650x_2ax_cfg &&
3607 trans->cfg != &killer1650w_2ax_cfg &&
3608 trans->cfg != &iwl_ax201_cfg_quz_hr) ||
3609 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
3610 u32 hw_status; 3606 u32 hw_status;
3611 3607
3612 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); 3608 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..9ef6b8fe03c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
99 u16 len = byte_cnt; 99 u16 len = byte_cnt;
100 __le16 bc_ent; 100 __le16 bc_ent;
101 101
102 if (trans_pcie->bc_table_dword) 102 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
103 len = DIV_ROUND_UP(len, 4);
104
105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
106 return; 103 return;
107 104
108 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 105 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
117 */ 114 */
118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 115 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
119 116
120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 117 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) 118 /* Starting from 22560, the HW expects bytes */
119 WARN_ON(trans_pcie->bc_table_dword);
120 WARN_ON(len > 0x3FFF);
121 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else 123 } else {
124 /* Until 22560, the HW expects DW */
125 WARN_ON(!trans_pcie->bc_table_dword);
126 len = DIV_ROUND_UP(len, 4);
127 WARN_ON(len > 0xFFF);
128 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
124 scd_bc_tbl->tfd_offset[idx] = bc_ent; 129 scd_bc_tbl->tfd_offset[idx] = bc_ent;
130 }
125} 131}
126 132
127/* 133/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fa4245d0d4a8..2f0ba7ef53b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
435 DMA_TO_DEVICE); 435 DMA_TO_DEVICE);
436 } 436 }
437 437
438 meta->tbs = 0;
439
438 if (trans->cfg->use_tfh) { 440 if (trans->cfg->use_tfh) {
439 struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 441 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
440 442
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 519b4ee88c5c..772e54f0696f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3617,10 +3617,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
3617 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 3617 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3618 cb->nlh->nlmsg_seq, &hwsim_genl_family, 3618 cb->nlh->nlmsg_seq, &hwsim_genl_family,
3619 NLM_F_MULTI, HWSIM_CMD_GET_RADIO); 3619 NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
3620 if (!hdr) 3620 if (hdr) {
3621 genl_dump_check_consistent(cb, hdr);
3622 genlmsg_end(skb, hdr);
3623 } else {
3621 res = -EMSGSIZE; 3624 res = -EMSGSIZE;
3622 genl_dump_check_consistent(cb, hdr); 3625 }
3623 genlmsg_end(skb, hdr);
3624 } 3626 }
3625 3627
3626done: 3628done:
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 3e442c7f7882..095837fba300 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -124,6 +124,7 @@ enum {
124 124
125#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) 125#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
126 126
127#define WPA_GTK_OUI_OFFSET 2
127#define RSN_GTK_OUI_OFFSET 2 128#define RSN_GTK_OUI_OFFSET 2
128 129
129#define MWIFIEX_OUI_NOT_PRESENT 0 130#define MWIFIEX_OUI_NOT_PRESENT 0
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0d6d41727037..21dda385f6c6 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
181 u8 ret = MWIFIEX_OUI_NOT_PRESENT; 181 u8 ret = MWIFIEX_OUI_NOT_PRESENT;
182 182
183 if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) { 183 if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
184 iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; 184 iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
185 WPA_GTK_OUI_OFFSET);
185 oui = &mwifiex_wpa_oui[cipher][0]; 186 oui = &mwifiex_wpa_oui[cipher][0];
186 ret = mwifiex_search_oui_in_ie(iebody, oui); 187 ret = mwifiex_search_oui_in_ie(iebody, oui);
187 if (ret) 188 if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 627ed1fc7b15..645f4d15fb61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
136 .release_buffered_frames = mt76_release_buffered_frames, 136 .release_buffered_frames = mt76_release_buffered_frames,
137}; 137};
138 138
139static int mt76x0u_init_hardware(struct mt76x02_dev *dev) 139static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
140{ 140{
141 int err; 141 int err;
142 142
143 mt76x0_chip_onoff(dev, true, true); 143 mt76x0_chip_onoff(dev, true, reset);
144 144
145 if (!mt76x02_wait_for_mac(&dev->mt76)) 145 if (!mt76x02_wait_for_mac(&dev->mt76))
146 return -ETIMEDOUT; 146 return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
173 if (err < 0) 173 if (err < 0)
174 goto out_err; 174 goto out_err;
175 175
176 err = mt76x0u_init_hardware(dev); 176 err = mt76x0u_init_hardware(dev, true);
177 if (err < 0) 177 if (err < 0)
178 goto out_err; 178 goto out_err;
179 179
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
309 if (ret < 0) 309 if (ret < 0)
310 goto err; 310 goto err;
311 311
312 ret = mt76x0u_init_hardware(dev); 312 ret = mt76x0u_init_hardware(dev, false);
313 if (ret) 313 if (ret)
314 goto err; 314 goto err;
315 315
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c9b957ac5733..ecbe78b8027b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
6095 } 6095 }
6096 6096
6097 /* 6097 /*
6098 * Clear encryption initialization vectors on start, but keep them
6099 * for watchdog reset. Otherwise we will have wrong IVs and not be
6100 * able to keep connections after reset.
6101 */
6102 if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
6103 for (i = 0; i < 256; i++)
6104 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
6105
6106 /*
6098 * Clear all beacons 6107 * Clear all beacons
6099 */ 6108 */
6100 for (i = 0; i < 8; i++) 6109 for (i = 0; i < 8; i++)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 7e43690a861c..2b216edd0c7d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
658 DEVICE_STATE_ENABLED_RADIO, 658 DEVICE_STATE_ENABLED_RADIO,
659 DEVICE_STATE_SCANNING, 659 DEVICE_STATE_SCANNING,
660 DEVICE_STATE_FLUSHING, 660 DEVICE_STATE_FLUSHING,
661 DEVICE_STATE_RESET,
661 662
662 /* 663 /*
663 * Driver configuration 664 * Driver configuration
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 35414f97a978..9d158237ac67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1256 1256
1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) 1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1258{ 1258{
1259 int retval; 1259 int retval = 0;
1260 1260
1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { 1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
1262 /* 1262 /*
1263 * This is special case for ieee80211_restart_hw(), otherwise 1263 * This is special case for ieee80211_restart_hw(), otherwise
1264 * mac80211 never call start() two times in row without stop(); 1264 * mac80211 never call start() two times in row without stop();
1265 */ 1265 */
1266 set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1266 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); 1267 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
1267 rt2x00lib_stop(rt2x00dev); 1268 rt2x00lib_stop(rt2x00dev);
1268 } 1269 }
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1273 */ 1274 */
1274 retval = rt2x00lib_load_firmware(rt2x00dev); 1275 retval = rt2x00lib_load_firmware(rt2x00dev);
1275 if (retval) 1276 if (retval)
1276 return retval; 1277 goto out;
1277 1278
1278 /* 1279 /*
1279 * Initialize the device. 1280 * Initialize the device.
1280 */ 1281 */
1281 retval = rt2x00lib_initialize(rt2x00dev); 1282 retval = rt2x00lib_initialize(rt2x00dev);
1282 if (retval) 1283 if (retval)
1283 return retval; 1284 goto out;
1284 1285
1285 rt2x00dev->intf_ap_count = 0; 1286 rt2x00dev->intf_ap_count = 0;
1286 rt2x00dev->intf_sta_count = 0; 1287 rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1289 /* Enable the radio */ 1290 /* Enable the radio */
1290 retval = rt2x00lib_enable_radio(rt2x00dev); 1291 retval = rt2x00lib_enable_radio(rt2x00dev);
1291 if (retval) 1292 if (retval)
1292 return retval; 1293 goto out;
1293 1294
1294 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); 1295 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1295 1296
1296 return 0; 1297out:
1298 clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1299 return retval;
1297} 1300}
1298 1301
1299void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1302void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 5cf0b32c413b..e1bd344c4ebc 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -163,6 +163,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
163 .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | 163 .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
164 WIPHY_VENDOR_CMD_NEED_RUNNING, 164 WIPHY_VENDOR_CMD_NEED_RUNNING,
165 .doit = wlcore_vendor_cmd_smart_config_start, 165 .doit = wlcore_vendor_cmd_smart_config_start,
166 .policy = wlcore_vendor_attr_policy,
166 }, 167 },
167 { 168 {
168 .info = { 169 .info = {
@@ -172,6 +173,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
172 .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | 173 .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
173 WIPHY_VENDOR_CMD_NEED_RUNNING, 174 WIPHY_VENDOR_CMD_NEED_RUNNING,
174 .doit = wlcore_vendor_cmd_smart_config_stop, 175 .doit = wlcore_vendor_cmd_smart_config_stop,
176 .policy = wlcore_vendor_attr_policy,
175 }, 177 },
176 { 178 {
177 .info = { 179 .info = {
@@ -181,6 +183,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
181 .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | 183 .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
182 WIPHY_VENDOR_CMD_NEED_RUNNING, 184 WIPHY_VENDOR_CMD_NEED_RUNNING,
183 .doit = wlcore_vendor_cmd_smart_config_set_group_key, 185 .doit = wlcore_vendor_cmd_smart_config_set_group_key,
186 .policy = wlcore_vendor_attr_policy,
184 }, 187 },
185}; 188};
186 189
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1d9940d4e8c7..c9262ffeefe4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; 925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
926 nskb = xenvif_alloc_skb(0); 926 nskb = xenvif_alloc_skb(0);
927 if (unlikely(nskb == NULL)) { 927 if (unlikely(nskb == NULL)) {
928 skb_shinfo(skb)->nr_frags = 0;
928 kfree_skb(skb); 929 kfree_skb(skb);
929 xenvif_tx_err(queue, &txreq, extra_count, idx); 930 xenvif_tx_err(queue, &txreq, extra_count, idx);
930 if (net_ratelimit()) 931 if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
940 941
941 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 942 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
942 /* Failure in xenvif_set_skb_gso is fatal. */ 943 /* Failure in xenvif_set_skb_gso is fatal. */
944 skb_shinfo(skb)->nr_frags = 0;
943 kfree_skb(skb); 945 kfree_skb(skb);
944 kfree_skb(nskb); 946 kfree_skb(nskb);
945 break; 947 break;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index e65d027b91fa..529be35ac178 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
244 /* Reset possible fault of previous session */ 244 /* Reset possible fault of previous session */
245 clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); 245 clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
246 246
247 if (priv->config.reset_n_io) { 247 if (gpio_is_valid(priv->config.reset_n_io)) {
248 nfc_info(priv->dev, "reset the chip\n"); 248 nfc_info(priv->dev, "reset the chip\n");
249 gpio_set_value(priv->config.reset_n_io, 0); 249 gpio_set_value(priv->config.reset_n_io, 0);
250 usleep_range(5000, 10000); 250 usleep_range(5000, 10000);
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
255 255
256void nfcmrvl_chip_halt(struct nfcmrvl_private *priv) 256void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
257{ 257{
258 if (priv->config.reset_n_io) 258 if (gpio_is_valid(priv->config.reset_n_io))
259 gpio_set_value(priv->config.reset_n_io, 0); 259 gpio_set_value(priv->config.reset_n_io, 0);
260} 260}
261 261
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 9a22056e8d9e..e5a622ce4b95 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -26,7 +26,7 @@
26static unsigned int hci_muxed; 26static unsigned int hci_muxed;
27static unsigned int flow_control; 27static unsigned int flow_control;
28static unsigned int break_control; 28static unsigned int break_control;
29static unsigned int reset_n_io; 29static int reset_n_io = -EINVAL;
30 30
31/* 31/*
32** NFCMRVL NCI OPS 32** NFCMRVL NCI OPS
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
231module_param(hci_muxed, uint, 0); 231module_param(hci_muxed, uint, 0);
232MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one."); 232MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
233 233
234module_param(reset_n_io, uint, 0); 234module_param(reset_n_io, int, 0);
235MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal."); 235MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 945cc903d8f1..888e298f610b 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
305 305
306 /* No configuration for USB */ 306 /* No configuration for USB */
307 memset(&config, 0, sizeof(config)); 307 memset(&config, 0, sizeof(config));
308 config.reset_n_io = -EINVAL;
308 309
309 nfc_info(&udev->dev, "intf %p id %p\n", intf, id); 310 nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
310 311
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index c3e10b6ab3a4..f25f1ec5f9e9 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -333,6 +333,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
333 333
334 transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, 334 transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
335 skb->len - 2, GFP_KERNEL); 335 skb->len - 2, GFP_KERNEL);
336 if (!transaction)
337 return -ENOMEM;
336 338
337 transaction->aid_len = skb->data[1]; 339 transaction->aid_len = skb->data[1];
338 memcpy(transaction->aid, &skb->data[2], transaction->aid_len); 340 memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 06fc542fd198..6586378cacb0 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -317,6 +317,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
317 317
318 transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, 318 transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
319 skb->len - 2, GFP_KERNEL); 319 skb->len - 2, GFP_KERNEL);
320 if (!transaction)
321 return -ENOMEM;
320 322
321 transaction->aid_len = skb->data[1]; 323 transaction->aid_len = skb->data[1];
322 memcpy(transaction->aid, &skb->data[2], 324 memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 9dddf133658f..0a5e884a920c 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -6,11 +6,6 @@
6#include <linux/msi.h> 6#include <linux/msi.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8 8
9MODULE_LICENSE("Dual BSD/GPL");
10MODULE_VERSION("0.1");
11MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>");
12MODULE_DESCRIPTION("NTB MSI Interrupt Library");
13
14struct ntb_msi { 9struct ntb_msi {
15 u64 base_addr; 10 u64 base_addr;
16 u64 end_addr; 11 u64 end_addr;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 62d00fffa4af..3508a79110c7 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev,
62 struct nd_btt *nd_btt = to_nd_btt(dev); 62 struct nd_btt *nd_btt = to_nd_btt(dev);
63 ssize_t rc; 63 ssize_t rc;
64 64
65 device_lock(dev); 65 nd_device_lock(dev);
66 nvdimm_bus_lock(dev); 66 nvdimm_bus_lock(dev);
67 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, 67 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
68 btt_lbasize_supported); 68 btt_lbasize_supported);
69 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 69 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
70 buf[len - 1] == '\n' ? "" : "\n"); 70 buf[len - 1] == '\n' ? "" : "\n");
71 nvdimm_bus_unlock(dev); 71 nvdimm_bus_unlock(dev);
72 device_unlock(dev); 72 nd_device_unlock(dev);
73 73
74 return rc ? rc : len; 74 return rc ? rc : len;
75} 75}
@@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev,
91 struct nd_btt *nd_btt = to_nd_btt(dev); 91 struct nd_btt *nd_btt = to_nd_btt(dev);
92 ssize_t rc; 92 ssize_t rc;
93 93
94 device_lock(dev); 94 nd_device_lock(dev);
95 rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); 95 rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
96 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 96 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
97 buf[len - 1] == '\n' ? "" : "\n"); 97 buf[len - 1] == '\n' ? "" : "\n");
98 device_unlock(dev); 98 nd_device_unlock(dev);
99 99
100 return rc ? rc : len; 100 return rc ? rc : len;
101} 101}
@@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev,
120 struct nd_btt *nd_btt = to_nd_btt(dev); 120 struct nd_btt *nd_btt = to_nd_btt(dev);
121 ssize_t rc; 121 ssize_t rc;
122 122
123 device_lock(dev); 123 nd_device_lock(dev);
124 nvdimm_bus_lock(dev); 124 nvdimm_bus_lock(dev);
125 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); 125 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
126 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 126 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
127 buf[len - 1] == '\n' ? "" : "\n"); 127 buf[len - 1] == '\n' ? "" : "\n");
128 nvdimm_bus_unlock(dev); 128 nvdimm_bus_unlock(dev);
129 device_unlock(dev); 129 nd_device_unlock(dev);
130 130
131 return rc; 131 return rc;
132} 132}
@@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev,
138 struct nd_btt *nd_btt = to_nd_btt(dev); 138 struct nd_btt *nd_btt = to_nd_btt(dev);
139 ssize_t rc; 139 ssize_t rc;
140 140
141 device_lock(dev); 141 nd_device_lock(dev);
142 if (dev->driver) 142 if (dev->driver)
143 rc = sprintf(buf, "%llu\n", nd_btt->size); 143 rc = sprintf(buf, "%llu\n", nd_btt->size);
144 else { 144 else {
145 /* no size to convey if the btt instance is disabled */ 145 /* no size to convey if the btt instance is disabled */
146 rc = -ENXIO; 146 rc = -ENXIO;
147 } 147 }
148 device_unlock(dev); 148 nd_device_unlock(dev);
149 149
150 return rc; 150 return rc;
151} 151}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2dca3034fee0..798c5c4aea9c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -26,7 +26,7 @@
26 26
27int nvdimm_major; 27int nvdimm_major;
28static int nvdimm_bus_major; 28static int nvdimm_bus_major;
29static struct class *nd_class; 29struct class *nd_class;
30static DEFINE_IDA(nd_ida); 30static DEFINE_IDA(nd_ida);
31 31
32static int to_nd_device_type(struct device *dev) 32static int to_nd_device_type(struct device *dev)
@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
73{ 73{
74 nvdimm_bus_lock(&nvdimm_bus->dev); 74 nvdimm_bus_lock(&nvdimm_bus->dev);
75 if (--nvdimm_bus->probe_active == 0) 75 if (--nvdimm_bus->probe_active == 0)
76 wake_up(&nvdimm_bus->probe_wait); 76 wake_up(&nvdimm_bus->wait);
77 nvdimm_bus_unlock(&nvdimm_bus->dev); 77 nvdimm_bus_unlock(&nvdimm_bus->dev);
78} 78}
79 79
@@ -91,7 +91,10 @@ static int nvdimm_bus_probe(struct device *dev)
91 dev->driver->name, dev_name(dev)); 91 dev->driver->name, dev_name(dev));
92 92
93 nvdimm_bus_probe_start(nvdimm_bus); 93 nvdimm_bus_probe_start(nvdimm_bus);
94 debug_nvdimm_lock(dev);
94 rc = nd_drv->probe(dev); 95 rc = nd_drv->probe(dev);
96 debug_nvdimm_unlock(dev);
97
95 if (rc == 0) 98 if (rc == 0)
96 nd_region_probe_success(nvdimm_bus, dev); 99 nd_region_probe_success(nvdimm_bus, dev);
97 else 100 else
@@ -113,8 +116,11 @@ static int nvdimm_bus_remove(struct device *dev)
113 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 116 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
114 int rc = 0; 117 int rc = 0;
115 118
116 if (nd_drv->remove) 119 if (nd_drv->remove) {
120 debug_nvdimm_lock(dev);
117 rc = nd_drv->remove(dev); 121 rc = nd_drv->remove(dev);
122 debug_nvdimm_unlock(dev);
123 }
118 nd_region_disable(nvdimm_bus, dev); 124 nd_region_disable(nvdimm_bus, dev);
119 125
120 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, 126 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
@@ -140,7 +146,7 @@ static void nvdimm_bus_shutdown(struct device *dev)
140 146
141void nd_device_notify(struct device *dev, enum nvdimm_event event) 147void nd_device_notify(struct device *dev, enum nvdimm_event event)
142{ 148{
143 device_lock(dev); 149 nd_device_lock(dev);
144 if (dev->driver) { 150 if (dev->driver) {
145 struct nd_device_driver *nd_drv; 151 struct nd_device_driver *nd_drv;
146 152
@@ -148,7 +154,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event)
148 if (nd_drv->notify) 154 if (nd_drv->notify)
149 nd_drv->notify(dev, event); 155 nd_drv->notify(dev, event);
150 } 156 }
151 device_unlock(dev); 157 nd_device_unlock(dev);
152} 158}
153EXPORT_SYMBOL(nd_device_notify); 159EXPORT_SYMBOL(nd_device_notify);
154 160
@@ -296,7 +302,7 @@ static void nvdimm_bus_release(struct device *dev)
296 kfree(nvdimm_bus); 302 kfree(nvdimm_bus);
297} 303}
298 304
299static bool is_nvdimm_bus(struct device *dev) 305bool is_nvdimm_bus(struct device *dev)
300{ 306{
301 return dev->release == nvdimm_bus_release; 307 return dev->release == nvdimm_bus_release;
302} 308}
@@ -341,7 +347,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
341 return NULL; 347 return NULL;
342 INIT_LIST_HEAD(&nvdimm_bus->list); 348 INIT_LIST_HEAD(&nvdimm_bus->list);
343 INIT_LIST_HEAD(&nvdimm_bus->mapping_list); 349 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
344 init_waitqueue_head(&nvdimm_bus->probe_wait); 350 init_waitqueue_head(&nvdimm_bus->wait);
345 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); 351 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
346 if (nvdimm_bus->id < 0) { 352 if (nvdimm_bus->id < 0) {
347 kfree(nvdimm_bus); 353 kfree(nvdimm_bus);
@@ -426,6 +432,9 @@ static int nd_bus_remove(struct device *dev)
426 list_del_init(&nvdimm_bus->list); 432 list_del_init(&nvdimm_bus->list);
427 mutex_unlock(&nvdimm_bus_list_mutex); 433 mutex_unlock(&nvdimm_bus_list_mutex);
428 434
435 wait_event(nvdimm_bus->wait,
436 atomic_read(&nvdimm_bus->ioctl_active) == 0);
437
429 nd_synchronize(); 438 nd_synchronize();
430 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); 439 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
431 440
@@ -547,13 +556,38 @@ EXPORT_SYMBOL(nd_device_register);
547 556
548void nd_device_unregister(struct device *dev, enum nd_async_mode mode) 557void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
549{ 558{
559 bool killed;
560
550 switch (mode) { 561 switch (mode) {
551 case ND_ASYNC: 562 case ND_ASYNC:
563 /*
564 * In the async case this is being triggered with the
565 * device lock held and the unregistration work needs to
566 * be moved out of line iff this is thread has won the
567 * race to schedule the deletion.
568 */
569 if (!kill_device(dev))
570 return;
571
552 get_device(dev); 572 get_device(dev);
553 async_schedule_domain(nd_async_device_unregister, dev, 573 async_schedule_domain(nd_async_device_unregister, dev,
554 &nd_async_domain); 574 &nd_async_domain);
555 break; 575 break;
556 case ND_SYNC: 576 case ND_SYNC:
577 /*
578 * In the sync case the device is being unregistered due
579 * to a state change of the parent. Claim the kill state
580 * to synchronize against other unregistration requests,
581 * or otherwise let the async path handle it if the
582 * unregistration was already queued.
583 */
584 nd_device_lock(dev);
585 killed = kill_device(dev);
586 nd_device_unlock(dev);
587
588 if (!killed)
589 return;
590
557 nd_synchronize(); 591 nd_synchronize();
558 device_unregister(dev); 592 device_unregister(dev);
559 break; 593 break;
@@ -859,10 +893,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
859 do { 893 do {
860 if (nvdimm_bus->probe_active == 0) 894 if (nvdimm_bus->probe_active == 0)
861 break; 895 break;
862 nvdimm_bus_unlock(&nvdimm_bus->dev); 896 nvdimm_bus_unlock(dev);
863 wait_event(nvdimm_bus->probe_wait, 897 nd_device_unlock(dev);
898 wait_event(nvdimm_bus->wait,
864 nvdimm_bus->probe_active == 0); 899 nvdimm_bus->probe_active == 0);
865 nvdimm_bus_lock(&nvdimm_bus->dev); 900 nd_device_lock(dev);
901 nvdimm_bus_lock(dev);
866 } while (true); 902 } while (true);
867} 903}
868 904
@@ -945,20 +981,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
945 int read_only, unsigned int ioctl_cmd, unsigned long arg) 981 int read_only, unsigned int ioctl_cmd, unsigned long arg)
946{ 982{
947 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 983 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
948 static char out_env[ND_CMD_MAX_ENVELOPE];
949 static char in_env[ND_CMD_MAX_ENVELOPE];
950 const struct nd_cmd_desc *desc = NULL; 984 const struct nd_cmd_desc *desc = NULL;
951 unsigned int cmd = _IOC_NR(ioctl_cmd); 985 unsigned int cmd = _IOC_NR(ioctl_cmd);
952 struct device *dev = &nvdimm_bus->dev; 986 struct device *dev = &nvdimm_bus->dev;
953 void __user *p = (void __user *) arg; 987 void __user *p = (void __user *) arg;
988 char *out_env = NULL, *in_env = NULL;
954 const char *cmd_name, *dimm_name; 989 const char *cmd_name, *dimm_name;
955 u32 in_len = 0, out_len = 0; 990 u32 in_len = 0, out_len = 0;
956 unsigned int func = cmd; 991 unsigned int func = cmd;
957 unsigned long cmd_mask; 992 unsigned long cmd_mask;
958 struct nd_cmd_pkg pkg; 993 struct nd_cmd_pkg pkg;
959 int rc, i, cmd_rc; 994 int rc, i, cmd_rc;
995 void *buf = NULL;
960 u64 buf_len = 0; 996 u64 buf_len = 0;
961 void *buf;
962 997
963 if (nvdimm) { 998 if (nvdimm) {
964 desc = nd_cmd_dimm_desc(cmd); 999 desc = nd_cmd_dimm_desc(cmd);
@@ -989,7 +1024,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
989 case ND_CMD_ARS_START: 1024 case ND_CMD_ARS_START:
990 case ND_CMD_CLEAR_ERROR: 1025 case ND_CMD_CLEAR_ERROR:
991 case ND_CMD_CALL: 1026 case ND_CMD_CALL:
992 dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", 1027 dev_dbg(dev, "'%s' command while read-only.\n",
993 nvdimm ? nvdimm_cmd_name(cmd) 1028 nvdimm ? nvdimm_cmd_name(cmd)
994 : nvdimm_bus_cmd_name(cmd)); 1029 : nvdimm_bus_cmd_name(cmd));
995 return -EPERM; 1030 return -EPERM;
@@ -998,6 +1033,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
998 } 1033 }
999 1034
1000 /* process an input envelope */ 1035 /* process an input envelope */
1036 in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
1037 if (!in_env)
1038 return -ENOMEM;
1001 for (i = 0; i < desc->in_num; i++) { 1039 for (i = 0; i < desc->in_num; i++) {
1002 u32 in_size, copy; 1040 u32 in_size, copy;
1003 1041
@@ -1005,14 +1043,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1005 if (in_size == UINT_MAX) { 1043 if (in_size == UINT_MAX) {
1006 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", 1044 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
1007 __func__, dimm_name, cmd_name, i); 1045 __func__, dimm_name, cmd_name, i);
1008 return -ENXIO; 1046 rc = -ENXIO;
1047 goto out;
1009 } 1048 }
1010 if (in_len < sizeof(in_env)) 1049 if (in_len < ND_CMD_MAX_ENVELOPE)
1011 copy = min_t(u32, sizeof(in_env) - in_len, in_size); 1050 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
1012 else 1051 else
1013 copy = 0; 1052 copy = 0;
1014 if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) 1053 if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
1015 return -EFAULT; 1054 rc = -EFAULT;
1055 goto out;
1056 }
1016 in_len += in_size; 1057 in_len += in_size;
1017 } 1058 }
1018 1059
@@ -1024,6 +1065,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1024 } 1065 }
1025 1066
1026 /* process an output envelope */ 1067 /* process an output envelope */
1068 out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
1069 if (!out_env) {
1070 rc = -ENOMEM;
1071 goto out;
1072 }
1073
1027 for (i = 0; i < desc->out_num; i++) { 1074 for (i = 0; i < desc->out_num; i++) {
1028 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, 1075 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
1029 (u32 *) in_env, (u32 *) out_env, 0); 1076 (u32 *) in_env, (u32 *) out_env, 0);
@@ -1032,15 +1079,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1032 if (out_size == UINT_MAX) { 1079 if (out_size == UINT_MAX) {
1033 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", 1080 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
1034 dimm_name, cmd_name, i); 1081 dimm_name, cmd_name, i);
1035 return -EFAULT; 1082 rc = -EFAULT;
1083 goto out;
1036 } 1084 }
1037 if (out_len < sizeof(out_env)) 1085 if (out_len < ND_CMD_MAX_ENVELOPE)
1038 copy = min_t(u32, sizeof(out_env) - out_len, out_size); 1086 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
1039 else 1087 else
1040 copy = 0; 1088 copy = 0;
1041 if (copy && copy_from_user(&out_env[out_len], 1089 if (copy && copy_from_user(&out_env[out_len],
1042 p + in_len + out_len, copy)) 1090 p + in_len + out_len, copy)) {
1043 return -EFAULT; 1091 rc = -EFAULT;
1092 goto out;
1093 }
1044 out_len += out_size; 1094 out_len += out_size;
1045 } 1095 }
1046 1096
@@ -1048,19 +1098,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1048 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 1098 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
1049 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, 1099 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
1050 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); 1100 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
1051 return -EINVAL; 1101 rc = -EINVAL;
1102 goto out;
1052 } 1103 }
1053 1104
1054 buf = vmalloc(buf_len); 1105 buf = vmalloc(buf_len);
1055 if (!buf) 1106 if (!buf) {
1056 return -ENOMEM; 1107 rc = -ENOMEM;
1108 goto out;
1109 }
1057 1110
1058 if (copy_from_user(buf, p, buf_len)) { 1111 if (copy_from_user(buf, p, buf_len)) {
1059 rc = -EFAULT; 1112 rc = -EFAULT;
1060 goto out; 1113 goto out;
1061 } 1114 }
1062 1115
1063 nvdimm_bus_lock(&nvdimm_bus->dev); 1116 nd_device_lock(dev);
1117 nvdimm_bus_lock(dev);
1064 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); 1118 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
1065 if (rc) 1119 if (rc)
1066 goto out_unlock; 1120 goto out_unlock;
@@ -1075,39 +1129,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1075 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, 1129 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
1076 clear_err->cleared); 1130 clear_err->cleared);
1077 } 1131 }
1078 nvdimm_bus_unlock(&nvdimm_bus->dev);
1079 1132
1080 if (copy_to_user(p, buf, buf_len)) 1133 if (copy_to_user(p, buf, buf_len))
1081 rc = -EFAULT; 1134 rc = -EFAULT;
1082 1135
1083 vfree(buf); 1136out_unlock:
1084 return rc; 1137 nvdimm_bus_unlock(dev);
1085 1138 nd_device_unlock(dev);
1086 out_unlock: 1139out:
1087 nvdimm_bus_unlock(&nvdimm_bus->dev); 1140 kfree(in_env);
1088 out: 1141 kfree(out_env);
1089 vfree(buf); 1142 vfree(buf);
1090 return rc; 1143 return rc;
1091} 1144}
1092 1145
1093static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1146enum nd_ioctl_mode {
1094{ 1147 BUS_IOCTL,
1095 long id = (long) file->private_data; 1148 DIMM_IOCTL,
1096 int rc = -ENXIO, ro; 1149};
1097 struct nvdimm_bus *nvdimm_bus;
1098
1099 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
1100 mutex_lock(&nvdimm_bus_list_mutex);
1101 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1102 if (nvdimm_bus->id == id) {
1103 rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
1104 break;
1105 }
1106 }
1107 mutex_unlock(&nvdimm_bus_list_mutex);
1108
1109 return rc;
1110}
1111 1150
1112static int match_dimm(struct device *dev, void *data) 1151static int match_dimm(struct device *dev, void *data)
1113{ 1152{
@@ -1122,31 +1161,62 @@ static int match_dimm(struct device *dev, void *data)
1122 return 0; 1161 return 0;
1123} 1162}
1124 1163
1125static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1164static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1165 enum nd_ioctl_mode mode)
1166
1126{ 1167{
1127 int rc = -ENXIO, ro; 1168 struct nvdimm_bus *nvdimm_bus, *found = NULL;
1128 struct nvdimm_bus *nvdimm_bus; 1169 long id = (long) file->private_data;
1170 struct nvdimm *nvdimm = NULL;
1171 int rc, ro;
1129 1172
1130 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); 1173 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
1131 mutex_lock(&nvdimm_bus_list_mutex); 1174 mutex_lock(&nvdimm_bus_list_mutex);
1132 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { 1175 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1133 struct device *dev = device_find_child(&nvdimm_bus->dev, 1176 if (mode == DIMM_IOCTL) {
1134 file->private_data, match_dimm); 1177 struct device *dev;
1135 struct nvdimm *nvdimm; 1178
1136 1179 dev = device_find_child(&nvdimm_bus->dev,
1137 if (!dev) 1180 file->private_data, match_dimm);
1138 continue; 1181 if (!dev)
1182 continue;
1183 nvdimm = to_nvdimm(dev);
1184 found = nvdimm_bus;
1185 } else if (nvdimm_bus->id == id) {
1186 found = nvdimm_bus;
1187 }
1139 1188
1140 nvdimm = to_nvdimm(dev); 1189 if (found) {
1141 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); 1190 atomic_inc(&nvdimm_bus->ioctl_active);
1142 put_device(dev); 1191 break;
1143 break; 1192 }
1144 } 1193 }
1145 mutex_unlock(&nvdimm_bus_list_mutex); 1194 mutex_unlock(&nvdimm_bus_list_mutex);
1146 1195
1196 if (!found)
1197 return -ENXIO;
1198
1199 nvdimm_bus = found;
1200 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
1201
1202 if (nvdimm)
1203 put_device(&nvdimm->dev);
1204 if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
1205 wake_up(&nvdimm_bus->wait);
1206
1147 return rc; 1207 return rc;
1148} 1208}
1149 1209
1210static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1211{
1212 return nd_ioctl(file, cmd, arg, BUS_IOCTL);
1213}
1214
1215static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1216{
1217 return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
1218}
1219
1150static int nd_open(struct inode *inode, struct file *file) 1220static int nd_open(struct inode *inode, struct file *file)
1151{ 1221{
1152 long minor = iminor(inode); 1222 long minor = iminor(inode);
@@ -1158,16 +1228,16 @@ static int nd_open(struct inode *inode, struct file *file)
1158static const struct file_operations nvdimm_bus_fops = { 1228static const struct file_operations nvdimm_bus_fops = {
1159 .owner = THIS_MODULE, 1229 .owner = THIS_MODULE,
1160 .open = nd_open, 1230 .open = nd_open,
1161 .unlocked_ioctl = nd_ioctl, 1231 .unlocked_ioctl = bus_ioctl,
1162 .compat_ioctl = nd_ioctl, 1232 .compat_ioctl = bus_ioctl,
1163 .llseek = noop_llseek, 1233 .llseek = noop_llseek,
1164}; 1234};
1165 1235
1166static const struct file_operations nvdimm_fops = { 1236static const struct file_operations nvdimm_fops = {
1167 .owner = THIS_MODULE, 1237 .owner = THIS_MODULE,
1168 .open = nd_open, 1238 .open = nd_open,
1169 .unlocked_ioctl = nvdimm_ioctl, 1239 .unlocked_ioctl = dimm_ioctl,
1170 .compat_ioctl = nvdimm_ioctl, 1240 .compat_ioctl = dimm_ioctl,
1171 .llseek = noop_llseek, 1241 .llseek = noop_llseek,
1172}; 1242};
1173 1243
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 5e1f060547bf..9204f1e9fd14 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
246 * 246 *
247 * Enforce that uuids can only be changed while the device is disabled 247 * Enforce that uuids can only be changed while the device is disabled
248 * (driver detached) 248 * (driver detached)
249 * LOCKING: expects device_lock() is held on entry 249 * LOCKING: expects nd_device_lock() is held on entry
250 */ 250 */
251int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, 251int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
252 size_t len) 252 size_t len)
@@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider);
347 347
348static int flush_namespaces(struct device *dev, void *data) 348static int flush_namespaces(struct device *dev, void *data)
349{ 349{
350 device_lock(dev); 350 nd_device_lock(dev);
351 device_unlock(dev); 351 nd_device_unlock(dev);
352 return 0; 352 return 0;
353} 353}
354 354
355static int flush_regions_dimms(struct device *dev, void *data) 355static int flush_regions_dimms(struct device *dev, void *data)
356{ 356{
357 device_lock(dev); 357 nd_device_lock(dev);
358 device_unlock(dev); 358 nd_device_unlock(dev);
359 device_for_each_child(dev, NULL, flush_namespaces); 359 device_for_each_child(dev, NULL, flush_namespaces);
360 return 0; 360 return 0;
361} 361}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index dfecd6e17043..29a065e769ea 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -484,12 +484,12 @@ static ssize_t security_store(struct device *dev,
484 * done while probing is idle and the DIMM is not in active use 484 * done while probing is idle and the DIMM is not in active use
485 * in any region. 485 * in any region.
486 */ 486 */
487 device_lock(dev); 487 nd_device_lock(dev);
488 nvdimm_bus_lock(dev); 488 nvdimm_bus_lock(dev);
489 wait_nvdimm_bus_probe_idle(dev); 489 wait_nvdimm_bus_probe_idle(dev);
490 rc = __security_store(dev, buf, len); 490 rc = __security_store(dev, buf, len);
491 nvdimm_bus_unlock(dev); 491 nvdimm_bus_unlock(dev);
492 device_unlock(dev); 492 nd_device_unlock(dev);
493 493
494 return rc; 494 return rc;
495} 495}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2d8d7e554877..a16e52251a30 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev,
410 struct nd_region *nd_region = to_nd_region(dev->parent); 410 struct nd_region *nd_region = to_nd_region(dev->parent);
411 ssize_t rc; 411 ssize_t rc;
412 412
413 device_lock(dev); 413 nd_device_lock(dev);
414 nvdimm_bus_lock(dev); 414 nvdimm_bus_lock(dev);
415 wait_nvdimm_bus_probe_idle(dev); 415 wait_nvdimm_bus_probe_idle(dev);
416 rc = __alt_name_store(dev, buf, len); 416 rc = __alt_name_store(dev, buf, len);
@@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev,
418 rc = nd_namespace_label_update(nd_region, dev); 418 rc = nd_namespace_label_update(nd_region, dev);
419 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); 419 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
420 nvdimm_bus_unlock(dev); 420 nvdimm_bus_unlock(dev);
421 device_unlock(dev); 421 nd_device_unlock(dev);
422 422
423 return rc < 0 ? rc : len; 423 return rc < 0 ? rc : len;
424} 424}
@@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev,
1077 if (rc) 1077 if (rc)
1078 return rc; 1078 return rc;
1079 1079
1080 device_lock(dev); 1080 nd_device_lock(dev);
1081 nvdimm_bus_lock(dev); 1081 nvdimm_bus_lock(dev);
1082 wait_nvdimm_bus_probe_idle(dev); 1082 wait_nvdimm_bus_probe_idle(dev);
1083 rc = __size_store(dev, val); 1083 rc = __size_store(dev, val);
@@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev,
1103 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); 1103 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1104 1104
1105 nvdimm_bus_unlock(dev); 1105 nvdimm_bus_unlock(dev);
1106 device_unlock(dev); 1106 nd_device_unlock(dev);
1107 1107
1108 return rc < 0 ? rc : len; 1108 return rc < 0 ? rc : len;
1109} 1109}
@@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev,
1286 } else 1286 } else
1287 return -ENXIO; 1287 return -ENXIO;
1288 1288
1289 device_lock(dev); 1289 nd_device_lock(dev);
1290 nvdimm_bus_lock(dev); 1290 nvdimm_bus_lock(dev);
1291 wait_nvdimm_bus_probe_idle(dev); 1291 wait_nvdimm_bus_probe_idle(dev);
1292 if (to_ndns(dev)->claim) 1292 if (to_ndns(dev)->claim)
@@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev,
1302 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 1302 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1303 buf[len - 1] == '\n' ? "" : "\n"); 1303 buf[len - 1] == '\n' ? "" : "\n");
1304 nvdimm_bus_unlock(dev); 1304 nvdimm_bus_unlock(dev);
1305 device_unlock(dev); 1305 nd_device_unlock(dev);
1306 1306
1307 return rc < 0 ? rc : len; 1307 return rc < 0 ? rc : len;
1308} 1308}
@@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev,
1376 } else 1376 } else
1377 return -ENXIO; 1377 return -ENXIO;
1378 1378
1379 device_lock(dev); 1379 nd_device_lock(dev);
1380 nvdimm_bus_lock(dev); 1380 nvdimm_bus_lock(dev);
1381 if (to_ndns(dev)->claim) 1381 if (to_ndns(dev)->claim)
1382 rc = -EBUSY; 1382 rc = -EBUSY;
@@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev,
1387 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", 1387 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1388 buf, buf[len - 1] == '\n' ? "" : "\n"); 1388 buf, buf[len - 1] == '\n' ? "" : "\n");
1389 nvdimm_bus_unlock(dev); 1389 nvdimm_bus_unlock(dev);
1390 device_unlock(dev); 1390 nd_device_unlock(dev);
1391 1391
1392 return rc ? rc : len; 1392 return rc ? rc : len;
1393} 1393}
@@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev,
1502 struct nd_namespace_common *ndns = to_ndns(dev); 1502 struct nd_namespace_common *ndns = to_ndns(dev);
1503 ssize_t rc; 1503 ssize_t rc;
1504 1504
1505 device_lock(dev); 1505 nd_device_lock(dev);
1506 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); 1506 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1507 device_unlock(dev); 1507 nd_device_unlock(dev);
1508 1508
1509 return rc; 1509 return rc;
1510} 1510}
@@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev,
1541 struct nd_region *nd_region = to_nd_region(dev->parent); 1541 struct nd_region *nd_region = to_nd_region(dev->parent);
1542 ssize_t rc; 1542 ssize_t rc;
1543 1543
1544 device_lock(dev); 1544 nd_device_lock(dev);
1545 nvdimm_bus_lock(dev); 1545 nvdimm_bus_lock(dev);
1546 wait_nvdimm_bus_probe_idle(dev); 1546 wait_nvdimm_bus_probe_idle(dev);
1547 rc = __holder_class_store(dev, buf); 1547 rc = __holder_class_store(dev, buf);
@@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev,
1549 rc = nd_namespace_label_update(nd_region, dev); 1549 rc = nd_namespace_label_update(nd_region, dev);
1550 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); 1550 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
1551 nvdimm_bus_unlock(dev); 1551 nvdimm_bus_unlock(dev);
1552 device_unlock(dev); 1552 nd_device_unlock(dev);
1553 1553
1554 return rc < 0 ? rc : len; 1554 return rc < 0 ? rc : len;
1555} 1555}
@@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev,
1560 struct nd_namespace_common *ndns = to_ndns(dev); 1560 struct nd_namespace_common *ndns = to_ndns(dev);
1561 ssize_t rc; 1561 ssize_t rc;
1562 1562
1563 device_lock(dev); 1563 nd_device_lock(dev);
1564 if (ndns->claim_class == NVDIMM_CCLASS_NONE) 1564 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1565 rc = sprintf(buf, "\n"); 1565 rc = sprintf(buf, "\n");
1566 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || 1566 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
@@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev,
1572 rc = sprintf(buf, "dax\n"); 1572 rc = sprintf(buf, "dax\n");
1573 else 1573 else
1574 rc = sprintf(buf, "<unknown>\n"); 1574 rc = sprintf(buf, "<unknown>\n");
1575 device_unlock(dev); 1575 nd_device_unlock(dev);
1576 1576
1577 return rc; 1577 return rc;
1578} 1578}
@@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev,
1586 char *mode; 1586 char *mode;
1587 ssize_t rc; 1587 ssize_t rc;
1588 1588
1589 device_lock(dev); 1589 nd_device_lock(dev);
1590 claim = ndns->claim; 1590 claim = ndns->claim;
1591 if (claim && is_nd_btt(claim)) 1591 if (claim && is_nd_btt(claim))
1592 mode = "safe"; 1592 mode = "safe";
@@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev,
1599 else 1599 else
1600 mode = "raw"; 1600 mode = "raw";
1601 rc = sprintf(buf, "%s\n", mode); 1601 rc = sprintf(buf, "%s\n", mode);
1602 device_unlock(dev); 1602 nd_device_unlock(dev);
1603 1603
1604 return rc; 1604 return rc;
1605} 1605}
@@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1703 * Flush any in-progess probes / removals in the driver 1703 * Flush any in-progess probes / removals in the driver
1704 * for the raw personality of this namespace. 1704 * for the raw personality of this namespace.
1705 */ 1705 */
1706 device_lock(&ndns->dev); 1706 nd_device_lock(&ndns->dev);
1707 device_unlock(&ndns->dev); 1707 nd_device_unlock(&ndns->dev);
1708 if (ndns->dev.driver) { 1708 if (ndns->dev.driver) {
1709 dev_dbg(&ndns->dev, "is active, can't bind %s\n", 1709 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1710 dev_name(dev)); 1710 dev_name(dev));
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 391e88de3a29..0ac52b6eb00e 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -9,6 +9,7 @@
9#include <linux/sizes.h> 9#include <linux/sizes.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/nd.h> 11#include <linux/nd.h>
12#include "nd.h"
12 13
13extern struct list_head nvdimm_bus_list; 14extern struct list_head nvdimm_bus_list;
14extern struct mutex nvdimm_bus_list_mutex; 15extern struct mutex nvdimm_bus_list_mutex;
@@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq;
17 18
18struct nvdimm_bus { 19struct nvdimm_bus {
19 struct nvdimm_bus_descriptor *nd_desc; 20 struct nvdimm_bus_descriptor *nd_desc;
20 wait_queue_head_t probe_wait; 21 wait_queue_head_t wait;
21 struct list_head list; 22 struct list_head list;
22 struct device dev; 23 struct device dev;
23 int id, probe_active; 24 int id, probe_active;
25 atomic_t ioctl_active;
24 struct list_head mapping_list; 26 struct list_head mapping_list;
25 struct mutex reconfig_mutex; 27 struct mutex reconfig_mutex;
26 struct badrange badrange; 28 struct badrange badrange;
@@ -181,4 +183,71 @@ ssize_t nd_namespace_store(struct device *dev,
181 struct nd_namespace_common **_ndns, const char *buf, 183 struct nd_namespace_common **_ndns, const char *buf,
182 size_t len); 184 size_t len);
183struct nd_pfn *to_nd_pfn_safe(struct device *dev); 185struct nd_pfn *to_nd_pfn_safe(struct device *dev);
186bool is_nvdimm_bus(struct device *dev);
187
188#ifdef CONFIG_PROVE_LOCKING
189extern struct class *nd_class;
190
191enum {
192 LOCK_BUS,
193 LOCK_NDCTL,
194 LOCK_REGION,
195 LOCK_DIMM = LOCK_REGION,
196 LOCK_NAMESPACE,
197 LOCK_CLAIM,
198};
199
200static inline void debug_nvdimm_lock(struct device *dev)
201{
202 if (is_nd_region(dev))
203 mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
204 else if (is_nvdimm(dev))
205 mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
206 else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
207 mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
208 else if (dev->parent && (is_nd_region(dev->parent)))
209 mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
210 else if (is_nvdimm_bus(dev))
211 mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
212 else if (dev->class && dev->class == nd_class)
213 mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
214 else
215 dev_WARN(dev, "unknown lock level\n");
216}
217
218static inline void debug_nvdimm_unlock(struct device *dev)
219{
220 mutex_unlock(&dev->lockdep_mutex);
221}
222
223static inline void nd_device_lock(struct device *dev)
224{
225 device_lock(dev);
226 debug_nvdimm_lock(dev);
227}
228
229static inline void nd_device_unlock(struct device *dev)
230{
231 debug_nvdimm_unlock(dev);
232 device_unlock(dev);
233}
234#else
235static inline void nd_device_lock(struct device *dev)
236{
237 device_lock(dev);
238}
239
240static inline void nd_device_unlock(struct device *dev)
241{
242 device_unlock(dev);
243}
244
245static inline void debug_nvdimm_lock(struct device *dev)
246{
247}
248
249static inline void debug_nvdimm_unlock(struct device *dev)
250{
251}
252#endif
184#endif /* __ND_CORE_H__ */ 253#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index df2bdbd22450..3e7b11cf1aae 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev,
67 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 67 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
68 ssize_t rc = 0; 68 ssize_t rc = 0;
69 69
70 device_lock(dev); 70 nd_device_lock(dev);
71 nvdimm_bus_lock(dev); 71 nvdimm_bus_lock(dev);
72 if (dev->driver) 72 if (dev->driver)
73 rc = -EBUSY; 73 rc = -EBUSY;
@@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev,
89 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 89 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
90 buf[len - 1] == '\n' ? "" : "\n"); 90 buf[len - 1] == '\n' ? "" : "\n");
91 nvdimm_bus_unlock(dev); 91 nvdimm_bus_unlock(dev);
92 device_unlock(dev); 92 nd_device_unlock(dev);
93 93
94 return rc ? rc : len; 94 return rc ? rc : len;
95} 95}
@@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev,
132 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 132 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
133 ssize_t rc; 133 ssize_t rc;
134 134
135 device_lock(dev); 135 nd_device_lock(dev);
136 nvdimm_bus_lock(dev); 136 nvdimm_bus_lock(dev);
137 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 137 rc = nd_size_select_store(dev, buf, &nd_pfn->align,
138 nd_pfn_supported_alignments()); 138 nd_pfn_supported_alignments());
139 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 139 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
140 buf[len - 1] == '\n' ? "" : "\n"); 140 buf[len - 1] == '\n' ? "" : "\n");
141 nvdimm_bus_unlock(dev); 141 nvdimm_bus_unlock(dev);
142 device_unlock(dev); 142 nd_device_unlock(dev);
143 143
144 return rc ? rc : len; 144 return rc ? rc : len;
145} 145}
@@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev,
161 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 161 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
162 ssize_t rc; 162 ssize_t rc;
163 163
164 device_lock(dev); 164 nd_device_lock(dev);
165 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); 165 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
166 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 166 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
167 buf[len - 1] == '\n' ? "" : "\n"); 167 buf[len - 1] == '\n' ? "" : "\n");
168 device_unlock(dev); 168 nd_device_unlock(dev);
169 169
170 return rc ? rc : len; 170 return rc ? rc : len;
171} 171}
@@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev,
190 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 190 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
191 ssize_t rc; 191 ssize_t rc;
192 192
193 device_lock(dev); 193 nd_device_lock(dev);
194 nvdimm_bus_lock(dev); 194 nvdimm_bus_lock(dev);
195 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 195 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
196 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 196 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
197 buf[len - 1] == '\n' ? "" : "\n"); 197 buf[len - 1] == '\n' ? "" : "\n");
198 nvdimm_bus_unlock(dev); 198 nvdimm_bus_unlock(dev);
199 device_unlock(dev); 199 nd_device_unlock(dev);
200 200
201 return rc; 201 return rc;
202} 202}
@@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev,
208 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 208 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
209 ssize_t rc; 209 ssize_t rc;
210 210
211 device_lock(dev); 211 nd_device_lock(dev);
212 if (dev->driver) { 212 if (dev->driver) {
213 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 213 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
214 u64 offset = __le64_to_cpu(pfn_sb->dataoff); 214 u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev,
222 /* no address to convey if the pfn instance is disabled */ 222 /* no address to convey if the pfn instance is disabled */
223 rc = -ENXIO; 223 rc = -ENXIO;
224 } 224 }
225 device_unlock(dev); 225 nd_device_unlock(dev);
226 226
227 return rc; 227 return rc;
228} 228}
@@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev,
234 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 234 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
235 ssize_t rc; 235 ssize_t rc;
236 236
237 device_lock(dev); 237 nd_device_lock(dev);
238 if (dev->driver) { 238 if (dev->driver) {
239 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 239 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
240 u64 offset = __le64_to_cpu(pfn_sb->dataoff); 240 u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev,
250 /* no size to convey if the pfn instance is disabled */ 250 /* no size to convey if the pfn instance is disabled */
251 rc = -ENXIO; 251 rc = -ENXIO;
252 } 252 }
253 device_unlock(dev); 253 nd_device_unlock(dev);
254 254
255 return rc; 255 return rc;
256} 256}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2bf3acd69613..4c121dd03dd9 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -522,8 +522,8 @@ static int nd_pmem_remove(struct device *dev)
522 nvdimm_namespace_detach_btt(to_nd_btt(dev)); 522 nvdimm_namespace_detach_btt(to_nd_btt(dev));
523 else { 523 else {
524 /* 524 /*
525 * Note, this assumes device_lock() context to not race 525 * Note, this assumes nd_device_lock() context to not
526 * nd_pmem_notify() 526 * race nd_pmem_notify()
527 */ 527 */
528 sysfs_put(pmem->bb_state); 528 sysfs_put(pmem->bb_state);
529 pmem->bb_state = NULL; 529 pmem->bb_state = NULL;
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index ef46cc3a71ae..37bf8719a2a4 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev)
34 if (rc) 34 if (rc)
35 return rc; 35 return rc;
36 36
37 rc = nd_region_register_namespaces(nd_region, &err);
38 if (rc < 0)
39 return rc;
40
41 ndrd = dev_get_drvdata(dev);
42 ndrd->ns_active = rc;
43 ndrd->ns_count = rc + err;
44
45 if (rc && err && rc == err)
46 return -ENODEV;
47
48 if (is_nd_pmem(&nd_region->dev)) { 37 if (is_nd_pmem(&nd_region->dev)) {
49 struct resource ndr_res; 38 struct resource ndr_res;
50 39
@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev)
60 nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); 49 nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
61 } 50 }
62 51
52 rc = nd_region_register_namespaces(nd_region, &err);
53 if (rc < 0)
54 return rc;
55
56 ndrd = dev_get_drvdata(dev);
57 ndrd->ns_active = rc;
58 ndrd->ns_count = rc + err;
59
60 if (rc && err && rc == err)
61 return -ENODEV;
62
63 nd_region->btt_seed = nd_btt_create(nd_region); 63 nd_region->btt_seed = nd_btt_create(nd_region);
64 nd_region->pfn_seed = nd_pfn_create(nd_region); 64 nd_region->pfn_seed = nd_pfn_create(nd_region);
65 nd_region->dax_seed = nd_dax_create(nd_region); 65 nd_region->dax_seed = nd_dax_create(nd_region);
@@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev)
102 nvdimm_bus_unlock(dev); 102 nvdimm_bus_unlock(dev);
103 103
104 /* 104 /*
105 * Note, this assumes device_lock() context to not race 105 * Note, this assumes nd_device_lock() context to not race
106 * nd_region_notify() 106 * nd_region_notify()
107 */ 107 */
108 sysfs_put(nd_region->bb_state); 108 sysfs_put(nd_region->bb_state);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 56f2227f192a..af30cbe7a8ea 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev,
331 * the v1.1 namespace label cookie definition. To read all this 331 * the v1.1 namespace label cookie definition. To read all this
332 * data we need to wait for probing to settle. 332 * data we need to wait for probing to settle.
333 */ 333 */
334 device_lock(dev); 334 nd_device_lock(dev);
335 nvdimm_bus_lock(dev); 335 nvdimm_bus_lock(dev);
336 wait_nvdimm_bus_probe_idle(dev); 336 wait_nvdimm_bus_probe_idle(dev);
337 if (nd_region->ndr_mappings) { 337 if (nd_region->ndr_mappings) {
@@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev,
348 } 348 }
349 } 349 }
350 nvdimm_bus_unlock(dev); 350 nvdimm_bus_unlock(dev);
351 device_unlock(dev); 351 nd_device_unlock(dev);
352 352
353 if (rc) 353 if (rc)
354 return rc; 354 return rc;
@@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev,
424 * memory nvdimm_bus_lock() is dropped, but that's userspace's 424 * memory nvdimm_bus_lock() is dropped, but that's userspace's
425 * problem to not race itself. 425 * problem to not race itself.
426 */ 426 */
427 nd_device_lock(dev);
427 nvdimm_bus_lock(dev); 428 nvdimm_bus_lock(dev);
428 wait_nvdimm_bus_probe_idle(dev); 429 wait_nvdimm_bus_probe_idle(dev);
429 available = nd_region_available_dpa(nd_region); 430 available = nd_region_available_dpa(nd_region);
430 nvdimm_bus_unlock(dev); 431 nvdimm_bus_unlock(dev);
432 nd_device_unlock(dev);
431 433
432 return sprintf(buf, "%llu\n", available); 434 return sprintf(buf, "%llu\n", available);
433} 435}
@@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev,
439 struct nd_region *nd_region = to_nd_region(dev); 441 struct nd_region *nd_region = to_nd_region(dev);
440 unsigned long long available = 0; 442 unsigned long long available = 0;
441 443
444 nd_device_lock(dev);
442 nvdimm_bus_lock(dev); 445 nvdimm_bus_lock(dev);
443 wait_nvdimm_bus_probe_idle(dev); 446 wait_nvdimm_bus_probe_idle(dev);
444 available = nd_region_allocatable_dpa(nd_region); 447 available = nd_region_allocatable_dpa(nd_region);
445 nvdimm_bus_unlock(dev); 448 nvdimm_bus_unlock(dev);
449 nd_device_unlock(dev);
446 450
447 return sprintf(buf, "%llu\n", available); 451 return sprintf(buf, "%llu\n", available);
448} 452}
@@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev,
561 struct nd_region *nd_region = to_nd_region(dev); 565 struct nd_region *nd_region = to_nd_region(dev);
562 ssize_t rc; 566 ssize_t rc;
563 567
564 device_lock(dev); 568 nd_device_lock(dev);
565 if (dev->driver) 569 if (dev->driver)
566 rc = badblocks_show(&nd_region->bb, buf, 0); 570 rc = badblocks_show(&nd_region->bb, buf, 0);
567 else 571 else
568 rc = -ENXIO; 572 rc = -ENXIO;
569 device_unlock(dev); 573 nd_device_unlock(dev);
570 574
571 return rc; 575 return rc;
572} 576}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cc09b81fc7f4..d3d6b7bd6903 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1286,6 +1286,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1286 */ 1286 */
1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1288 mutex_lock(&ctrl->scan_lock); 1288 mutex_lock(&ctrl->scan_lock);
1289 mutex_lock(&ctrl->subsys->lock);
1290 nvme_mpath_start_freeze(ctrl->subsys);
1291 nvme_mpath_wait_freeze(ctrl->subsys);
1289 nvme_start_freeze(ctrl); 1292 nvme_start_freeze(ctrl);
1290 nvme_wait_freeze(ctrl); 1293 nvme_wait_freeze(ctrl);
1291 } 1294 }
@@ -1316,6 +1319,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1316 nvme_update_formats(ctrl); 1319 nvme_update_formats(ctrl);
1317 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1320 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1318 nvme_unfreeze(ctrl); 1321 nvme_unfreeze(ctrl);
1322 nvme_mpath_unfreeze(ctrl->subsys);
1323 mutex_unlock(&ctrl->subsys->lock);
1319 mutex_unlock(&ctrl->scan_lock); 1324 mutex_unlock(&ctrl->scan_lock);
1320 } 1325 }
1321 if (effects & NVME_CMD_EFFECTS_CCC) 1326 if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1715,6 +1720,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1715 if (ns->head->disk) { 1720 if (ns->head->disk) {
1716 nvme_update_disk_info(ns->head->disk, ns, id); 1721 nvme_update_disk_info(ns->head->disk, ns, id);
1717 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1722 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1723 revalidate_disk(ns->head->disk);
1718 } 1724 }
1719#endif 1725#endif
1720} 1726}
@@ -2251,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2251 .vid = 0x1179, 2257 .vid = 0x1179,
2252 .mn = "THNSF5256GPUK TOSHIBA", 2258 .mn = "THNSF5256GPUK TOSHIBA",
2253 .quirks = NVME_QUIRK_NO_APST, 2259 .quirks = NVME_QUIRK_NO_APST,
2260 },
2261 {
2262 /*
2263 * This LiteON CL1-3D*-Q11 firmware version has a race
2264 * condition associated with actions related to suspend to idle
2265 * LiteON has resolved the problem in future firmware
2266 */
2267 .vid = 0x14a4,
2268 .fr = "22301111",
2269 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2254 } 2270 }
2255}; 2271};
2256 2272
@@ -2311,17 +2327,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
2311 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2327 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2312} 2328}
2313 2329
2314static void __nvme_release_subsystem(struct nvme_subsystem *subsys) 2330static void nvme_release_subsystem(struct device *dev)
2315{ 2331{
2332 struct nvme_subsystem *subsys =
2333 container_of(dev, struct nvme_subsystem, dev);
2334
2316 ida_simple_remove(&nvme_subsystems_ida, subsys->instance); 2335 ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
2317 kfree(subsys); 2336 kfree(subsys);
2318} 2337}
2319 2338
2320static void nvme_release_subsystem(struct device *dev)
2321{
2322 __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
2323}
2324
2325static void nvme_destroy_subsystem(struct kref *ref) 2339static void nvme_destroy_subsystem(struct kref *ref)
2326{ 2340{
2327 struct nvme_subsystem *subsys = 2341 struct nvme_subsystem *subsys =
@@ -2477,7 +2491,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2477 mutex_lock(&nvme_subsystems_lock); 2491 mutex_lock(&nvme_subsystems_lock);
2478 found = __nvme_find_get_subsystem(subsys->subnqn); 2492 found = __nvme_find_get_subsystem(subsys->subnqn);
2479 if (found) { 2493 if (found) {
2480 __nvme_release_subsystem(subsys); 2494 put_device(&subsys->dev);
2481 subsys = found; 2495 subsys = found;
2482 2496
2483 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2497 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
@@ -2489,6 +2503,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2489 if (ret) { 2503 if (ret) {
2490 dev_err(ctrl->device, 2504 dev_err(ctrl->device,
2491 "failed to register subsystem device.\n"); 2505 "failed to register subsystem device.\n");
2506 put_device(&subsys->dev);
2492 goto out_unlock; 2507 goto out_unlock;
2493 } 2508 }
2494 ida_init(&subsys->ns_ida); 2509 ida_init(&subsys->ns_ida);
@@ -2511,7 +2526,6 @@ out_put_subsystem:
2511 nvme_put_subsystem(subsys); 2526 nvme_put_subsystem(subsys);
2512out_unlock: 2527out_unlock:
2513 mutex_unlock(&nvme_subsystems_lock); 2528 mutex_unlock(&nvme_subsystems_lock);
2514 put_device(&subsys->dev);
2515 return ret; 2529 return ret;
2516} 2530}
2517 2531
@@ -2593,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2593 goto out_free; 2607 goto out_free;
2594 } 2608 }
2595 2609
2610 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2611 ctrl->cntlid = le16_to_cpu(id->cntlid);
2612
2596 if (!ctrl->identified) { 2613 if (!ctrl->identified) {
2597 int i; 2614 int i;
2598 2615
@@ -2693,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2693 goto out_free; 2710 goto out_free;
2694 } 2711 }
2695 } else { 2712 } else {
2696 ctrl->cntlid = le16_to_cpu(id->cntlid);
2697 ctrl->hmpre = le32_to_cpu(id->hmpre); 2713 ctrl->hmpre = le32_to_cpu(id->hmpre);
2698 ctrl->hmmin = le32_to_cpu(id->hmmin); 2714 ctrl->hmmin = le32_to_cpu(id->hmmin);
2699 ctrl->hmminds = le32_to_cpu(id->hmminds); 2715 ctrl->hmminds = le32_to_cpu(id->hmminds);
@@ -3573,6 +3589,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3573 struct nvme_ns *ns, *next; 3589 struct nvme_ns *ns, *next;
3574 LIST_HEAD(ns_list); 3590 LIST_HEAD(ns_list);
3575 3591
3592 /*
3593 * make sure to requeue I/O to all namespaces as these
3594 * might result from the scan itself and must complete
3595 * for the scan_work to make progress
3596 */
3597 nvme_mpath_clear_ctrl_paths(ctrl);
3598
3576 /* prevent racing with ns scanning */ 3599 /* prevent racing with ns scanning */
3577 flush_work(&ctrl->scan_work); 3600 flush_work(&ctrl->scan_work);
3578 3601
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a9a927677970..af831d3d15d0 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -12,9 +12,34 @@ module_param(multipath, bool, 0444);
12MODULE_PARM_DESC(multipath, 12MODULE_PARM_DESC(multipath,
13 "turn on native support for multiple controllers per subsystem"); 13 "turn on native support for multiple controllers per subsystem");
14 14
15inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 15void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
16{ 16{
17 return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); 17 struct nvme_ns_head *h;
18
19 lockdep_assert_held(&subsys->lock);
20 list_for_each_entry(h, &subsys->nsheads, entry)
21 if (h->disk)
22 blk_mq_unfreeze_queue(h->disk->queue);
23}
24
25void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
26{
27 struct nvme_ns_head *h;
28
29 lockdep_assert_held(&subsys->lock);
30 list_for_each_entry(h, &subsys->nsheads, entry)
31 if (h->disk)
32 blk_mq_freeze_queue_wait(h->disk->queue);
33}
34
35void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
36{
37 struct nvme_ns_head *h;
38
39 lockdep_assert_held(&subsys->lock);
40 list_for_each_entry(h, &subsys->nsheads, entry)
41 if (h->disk)
42 blk_freeze_queue_start(h->disk->queue);
18} 43}
19 44
20/* 45/*
@@ -109,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
109 [NVME_ANA_CHANGE] = "change", 134 [NVME_ANA_CHANGE] = "change",
110}; 135};
111 136
112void nvme_mpath_clear_current_path(struct nvme_ns *ns) 137bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
113{ 138{
114 struct nvme_ns_head *head = ns->head; 139 struct nvme_ns_head *head = ns->head;
140 bool changed = false;
115 int node; 141 int node;
116 142
117 if (!head) 143 if (!head)
118 return; 144 goto out;
119 145
120 for_each_node(node) { 146 for_each_node(node) {
121 if (ns == rcu_access_pointer(head->current_path[node])) 147 if (ns == rcu_access_pointer(head->current_path[node])) {
122 rcu_assign_pointer(head->current_path[node], NULL); 148 rcu_assign_pointer(head->current_path[node], NULL);
149 changed = true;
150 }
123 } 151 }
152out:
153 return changed;
154}
155
156void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
157{
158 struct nvme_ns *ns;
159
160 mutex_lock(&ctrl->scan_lock);
161 list_for_each_entry(ns, &ctrl->namespaces, list)
162 if (nvme_mpath_clear_current_path(ns))
163 kblockd_schedule_work(&ns->head->requeue_work);
164 mutex_unlock(&ctrl->scan_lock);
124} 165}
125 166
126static bool nvme_path_is_disabled(struct nvme_ns *ns) 167static bool nvme_path_is_disabled(struct nvme_ns *ns)
@@ -231,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
231 return ns; 272 return ns;
232} 273}
233 274
275static bool nvme_available_path(struct nvme_ns_head *head)
276{
277 struct nvme_ns *ns;
278
279 list_for_each_entry_rcu(ns, &head->list, siblings) {
280 switch (ns->ctrl->state) {
281 case NVME_CTRL_LIVE:
282 case NVME_CTRL_RESETTING:
283 case NVME_CTRL_CONNECTING:
284 /* fallthru */
285 return true;
286 default:
287 break;
288 }
289 }
290 return false;
291}
292
234static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, 293static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
235 struct bio *bio) 294 struct bio *bio)
236{ 295{
@@ -257,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
257 disk_devt(ns->head->disk), 316 disk_devt(ns->head->disk),
258 bio->bi_iter.bi_sector); 317 bio->bi_iter.bi_sector);
259 ret = direct_make_request(bio); 318 ret = direct_make_request(bio);
260 } else if (!list_empty_careful(&head->list)) { 319 } else if (nvme_available_path(head)) {
261 dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); 320 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
262 321
263 spin_lock_irq(&head->requeue_lock); 322 spin_lock_irq(&head->requeue_lock);
264 bio_list_add(&head->requeue_list, bio); 323 bio_list_add(&head->requeue_list, bio);
265 spin_unlock_irq(&head->requeue_lock); 324 spin_unlock_irq(&head->requeue_lock);
266 } else { 325 } else {
267 dev_warn_ratelimited(dev, "no path - failing I/O\n"); 326 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
268 327
269 bio->bi_status = BLK_STS_IOERR; 328 bio->bi_status = BLK_STS_IOERR;
270 bio_endio(bio); 329 bio_endio(bio);
@@ -369,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
369 srcu_read_unlock(&head->srcu, srcu_idx); 428 srcu_read_unlock(&head->srcu, srcu_idx);
370 } 429 }
371 430
431 synchronize_srcu(&ns->head->srcu);
372 kblockd_schedule_work(&ns->head->requeue_work); 432 kblockd_schedule_work(&ns->head->requeue_work);
373} 433}
374 434
@@ -622,7 +682,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
622{ 682{
623 int error; 683 int error;
624 684
625 if (!nvme_ctrl_use_ana(ctrl)) 685 /* check if multipath is enabled and we have the capability */
686 if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
626 return 0; 687 return 0;
627 688
628 ctrl->anacap = id->anacap; 689 ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 716a876119c8..2d678fb968c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -92,6 +92,11 @@ enum nvme_quirks {
92 * Broken Write Zeroes. 92 * Broken Write Zeroes.
93 */ 93 */
94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
95
96 /*
97 * Force simple suspend/resume path.
98 */
99 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
95}; 100};
96 101
97/* 102/*
@@ -485,7 +490,14 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
485extern const struct block_device_operations nvme_ns_head_ops; 490extern const struct block_device_operations nvme_ns_head_ops;
486 491
487#ifdef CONFIG_NVME_MULTIPATH 492#ifdef CONFIG_NVME_MULTIPATH
488bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); 493static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
494{
495 return ctrl->ana_log_buf != NULL;
496}
497
498void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
499void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
500void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
489void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 501void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
490 struct nvme_ctrl *ctrl, int *flags); 502 struct nvme_ctrl *ctrl, int *flags);
491void nvme_failover_req(struct request *req); 503void nvme_failover_req(struct request *req);
@@ -496,7 +508,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
496int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 508int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
497void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 509void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
498void nvme_mpath_stop(struct nvme_ctrl *ctrl); 510void nvme_mpath_stop(struct nvme_ctrl *ctrl);
499void nvme_mpath_clear_current_path(struct nvme_ns *ns); 511bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
512void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
500struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 513struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
501 514
502static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 515static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -544,7 +557,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
544static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 557static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
545{ 558{
546} 559}
547static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 560static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
561{
562 return false;
563}
564static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
548{ 565{
549} 566}
550static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 567static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -564,6 +581,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
564static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 581static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
565{ 582{
566} 583}
584static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
585{
586}
587static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
588{
589}
590static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
591{
592}
567#endif /* CONFIG_NVME_MULTIPATH */ 593#endif /* CONFIG_NVME_MULTIPATH */
568 594
569#ifdef CONFIG_NVM 595#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bb970ca82517..732d5b63ec05 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2254,9 +2254,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
2254 if (!dev->ctrl.tagset) { 2254 if (!dev->ctrl.tagset) {
2255 dev->tagset.ops = &nvme_mq_ops; 2255 dev->tagset.ops = &nvme_mq_ops;
2256 dev->tagset.nr_hw_queues = dev->online_queues - 1; 2256 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2257 dev->tagset.nr_maps = 1; /* default */ 2257 dev->tagset.nr_maps = 2; /* default + read */
2258 if (dev->io_queues[HCTX_TYPE_READ])
2259 dev->tagset.nr_maps++;
2260 if (dev->io_queues[HCTX_TYPE_POLL]) 2258 if (dev->io_queues[HCTX_TYPE_POLL])
2261 dev->tagset.nr_maps++; 2259 dev->tagset.nr_maps++;
2262 dev->tagset.timeout = NVME_IO_TIMEOUT; 2260 dev->tagset.timeout = NVME_IO_TIMEOUT;
@@ -2697,7 +2695,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
2697{ 2695{
2698 struct nvme_dev *dev = data; 2696 struct nvme_dev *dev = data;
2699 2697
2700 nvme_reset_ctrl_sync(&dev->ctrl); 2698 flush_work(&dev->ctrl.reset_work);
2701 flush_work(&dev->ctrl.scan_work); 2699 flush_work(&dev->ctrl.scan_work);
2702 nvme_put_ctrl(&dev->ctrl); 2700 nvme_put_ctrl(&dev->ctrl);
2703} 2701}
@@ -2763,6 +2761,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2763 2761
2764 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2765 2763
2764 nvme_reset_ctrl(&dev->ctrl);
2766 nvme_get_ctrl(&dev->ctrl); 2765 nvme_get_ctrl(&dev->ctrl);
2767 async_schedule(nvme_async_probe, dev); 2766 async_schedule(nvme_async_probe, dev);
2768 2767
@@ -2848,7 +2847,7 @@ static int nvme_resume(struct device *dev)
2848 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 2847 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
2849 struct nvme_ctrl *ctrl = &ndev->ctrl; 2848 struct nvme_ctrl *ctrl = &ndev->ctrl;
2850 2849
2851 if (pm_resume_via_firmware() || !ctrl->npss || 2850 if (ndev->last_ps == U32_MAX ||
2852 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 2851 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
2853 nvme_reset_ctrl(ctrl); 2852 nvme_reset_ctrl(ctrl);
2854 return 0; 2853 return 0;
@@ -2861,6 +2860,8 @@ static int nvme_suspend(struct device *dev)
2861 struct nvme_ctrl *ctrl = &ndev->ctrl; 2860 struct nvme_ctrl *ctrl = &ndev->ctrl;
2862 int ret = -EBUSY; 2861 int ret = -EBUSY;
2863 2862
2863 ndev->last_ps = U32_MAX;
2864
2864 /* 2865 /*
2865 * The platform does not remove power for a kernel managed suspend so 2866 * The platform does not remove power for a kernel managed suspend so
2866 * use host managed nvme power settings for lowest idle power if 2867 * use host managed nvme power settings for lowest idle power if
@@ -2868,8 +2869,15 @@ static int nvme_suspend(struct device *dev)
2868 * shutdown. But if the firmware is involved after the suspend or the 2869 * shutdown. But if the firmware is involved after the suspend or the
2869 * device does not support any non-default power states, shut down the 2870 * device does not support any non-default power states, shut down the
2870 * device fully. 2871 * device fully.
2872 *
2873 * If ASPM is not enabled for the device, shut down the device and allow
2874 * the PCI bus layer to put it into D3 in order to take the PCIe link
2875 * down, so as to allow the platform to achieve its minimum low-power
2876 * state (which may not be possible if the link is up).
2871 */ 2877 */
2872 if (pm_suspend_via_firmware() || !ctrl->npss) { 2878 if (pm_suspend_via_firmware() || !ctrl->npss ||
2879 !pcie_aspm_enabled(pdev) ||
2880 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
2873 nvme_dev_disable(ndev, true); 2881 nvme_dev_disable(ndev, true);
2874 return 0; 2882 return 0;
2875 } 2883 }
@@ -2882,7 +2890,6 @@ static int nvme_suspend(struct device *dev)
2882 ctrl->state != NVME_CTRL_ADMIN_ONLY) 2890 ctrl->state != NVME_CTRL_ADMIN_ONLY)
2883 goto unfreeze; 2891 goto unfreeze;
2884 2892
2885 ndev->last_ps = 0;
2886 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 2893 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
2887 if (ret < 0) 2894 if (ret < 0)
2888 goto unfreeze; 2895 goto unfreeze;
@@ -3029,6 +3036,8 @@ static const struct pci_device_id nvme_id_table[] = {
3029 .driver_data = NVME_QUIRK_LIGHTNVM, }, 3036 .driver_data = NVME_QUIRK_LIGHTNVM, },
3030 { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ 3037 { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
3031 .driver_data = NVME_QUIRK_LIGHTNVM, }, 3038 .driver_data = NVME_QUIRK_LIGHTNVM, },
3039 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3040 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3032 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3041 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3033 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 3042 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
3034 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 3043 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a249db528d54..1a6449bc547b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -562,13 +562,17 @@ out_destroy_cm_id:
562 return ret; 562 return ret;
563} 563}
564 564
565static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{
567 rdma_disconnect(queue->cm_id);
568 ib_drain_qp(queue->qp);
569}
570
565static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 571static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{ 572{
567 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) 573 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
568 return; 574 return;
569 575 __nvme_rdma_stop_queue(queue);
570 rdma_disconnect(queue->cm_id);
571 ib_drain_qp(queue->qp);
572} 576}
573 577
574static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 578static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
607 else 611 else
608 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 612 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
609 613
610 if (!ret) 614 if (!ret) {
611 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 615 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
612 else 616 } else {
617 __nvme_rdma_stop_queue(queue);
613 dev_info(ctrl->ctrl.device, 618 dev_info(ctrl->ctrl.device,
614 "failed to connect queue: %d ret=%d\n", idx, ret); 619 "failed to connect queue: %d ret=%d\n", idx, ret);
620 }
615 return ret; 621 return ret;
616} 622}
617 623
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index cd52b9f15376..98613a45bd3b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
675 675
676found: 676found:
677 list_del(&p->entry); 677 list_del(&p->entry);
678 nvmet_port_del_ctrls(port, subsys);
678 nvmet_port_disc_changed(port, subsys); 679 nvmet_port_disc_changed(port, subsys);
679 680
680 if (list_empty(&port->subsystems)) 681 if (list_empty(&port->subsystems))
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index dad0243c7c96..3a67e244e568 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
46 u16 status; 46 u16 status;
47 47
48 switch (errno) { 48 switch (errno) {
49 case 0:
50 status = NVME_SC_SUCCESS;
51 break;
49 case -ENOSPC: 52 case -ENOSPC:
50 req->error_loc = offsetof(struct nvme_rw_command, length); 53 req->error_loc = offsetof(struct nvme_rw_command, length);
51 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; 54 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
@@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
280} 283}
281EXPORT_SYMBOL_GPL(nvmet_unregister_transport); 284EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
282 285
286void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
287{
288 struct nvmet_ctrl *ctrl;
289
290 mutex_lock(&subsys->lock);
291 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
292 if (ctrl->port == port)
293 ctrl->ops->delete_ctrl(ctrl);
294 }
295 mutex_unlock(&subsys->lock);
296}
297
283int nvmet_enable_port(struct nvmet_port *port) 298int nvmet_enable_port(struct nvmet_port *port)
284{ 299{
285 const struct nvmet_fabrics_ops *ops; 300 const struct nvmet_fabrics_ops *ops;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b16dc3981c69..0940c5024a34 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
654 mutex_lock(&nvme_loop_ports_mutex); 654 mutex_lock(&nvme_loop_ports_mutex);
655 list_del_init(&port->entry); 655 list_del_init(&port->entry);
656 mutex_unlock(&nvme_loop_ports_mutex); 656 mutex_unlock(&nvme_loop_ports_mutex);
657
658 /*
659 * Ensure any ctrls that are in the process of being
660 * deleted are in fact deleted before we return
661 * and free the port. This is to prevent active
662 * ctrls from using a port after it's freed.
663 */
664 flush_workqueue(nvme_delete_wq);
657} 665}
658 666
659static const struct nvmet_fabrics_ops nvme_loop_ops = { 667static const struct nvmet_fabrics_ops nvme_loop_ops = {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ee66c610739..c51f8dd01dc4 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
418int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); 418int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
419void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); 419void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
420 420
421void nvmet_port_del_ctrls(struct nvmet_port *port,
422 struct nvmet_subsys *subsys);
423
421int nvmet_enable_port(struct nvmet_port *port); 424int nvmet_enable_port(struct nvmet_port *port);
422void nvmet_disable_port(struct nvmet_port *port); 425void nvmet_disable_port(struct nvmet_port *port);
423 426
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
index 6f303b91f6e7..9e0c429cd08a 100644
--- a/drivers/nvmem/nvmem-sysfs.c
+++ b/drivers/nvmem/nvmem-sysfs.c
@@ -224,10 +224,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
224 if (!config->base_dev) 224 if (!config->base_dev)
225 return -EINVAL; 225 return -EINVAL;
226 226
227 if (nvmem->read_only) 227 if (nvmem->read_only) {
228 nvmem->eeprom = bin_attr_ro_root_nvmem; 228 if (config->root_only)
229 else 229 nvmem->eeprom = bin_attr_ro_root_nvmem;
230 nvmem->eeprom = bin_attr_rw_root_nvmem; 230 else
231 nvmem->eeprom = bin_attr_ro_nvmem;
232 } else {
233 if (config->root_only)
234 nvmem->eeprom = bin_attr_rw_root_nvmem;
235 else
236 nvmem->eeprom = bin_attr_rw_nvmem;
237 }
231 nvmem->eeprom.attr.name = "eeprom"; 238 nvmem->eeprom.attr.name = "eeprom";
232 nvmem->eeprom.size = nvmem->size; 239 nvmem->eeprom.size = nvmem->size;
233#ifdef CONFIG_DEBUG_LOCK_ALLOC 240#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7f84bb4903ca..a296eaf52a5b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
277 * of_irq_parse_one - Resolve an interrupt for a device 277 * of_irq_parse_one - Resolve an interrupt for a device
278 * @device: the device whose interrupt is to be resolved 278 * @device: the device whose interrupt is to be resolved
279 * @index: index of the interrupt to resolve 279 * @index: index of the interrupt to resolve
280 * @out_irq: structure of_irq filled by this function 280 * @out_irq: structure of_phandle_args filled by this function
281 * 281 *
282 * This function resolves an interrupt for a node by walking the interrupt tree, 282 * This function resolves an interrupt for a node by walking the interrupt tree,
283 * finding which interrupt controller node it is attached to, and returning the 283 * finding which interrupt controller node it is attached to, and returning the
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index c1b67dd7cd6e..83c766233181 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
206 for_each_child_of_node(local_fixups, child) { 206 for_each_child_of_node(local_fixups, child) {
207 207
208 for_each_child_of_node(overlay, overlay_child) 208 for_each_child_of_node(overlay, overlay_child)
209 if (!node_name_cmp(child, overlay_child)) 209 if (!node_name_cmp(child, overlay_child)) {
210 of_node_put(overlay_child);
210 break; 211 break;
212 }
211 213
212 if (!overlay_child) 214 if (!overlay_child) {
215 of_node_put(child);
213 return -EINVAL; 216 return -EINVAL;
217 }
214 218
215 err = adjust_local_phandle_references(child, overlay_child, 219 err = adjust_local_phandle_references(child, overlay_child,
216 phandle_delta); 220 phandle_delta);
217 if (err) 221 if (err) {
222 of_node_put(child);
218 return err; 223 return err;
224 }
219 } 225 }
220 226
221 return 0; 227 return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 29ed5ec1ac27..1b27b5af3d55 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1025,10 +1025,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
1025 if (state == PCI_D0) { 1025 if (state == PCI_D0) {
1026 pci_platform_power_transition(dev, PCI_D0); 1026 pci_platform_power_transition(dev, PCI_D0);
1027 /* 1027 /*
1028 * Mandatory power management transition delays are 1028 * Mandatory power management transition delays, see
1029 * handled in the PCIe portdrv resume hooks. 1029 * PCI Express Base Specification Revision 2.0 Section
1030 * 6.6.1: Conventional Reset. Do not delay for
1031 * devices powered on/off by corresponding bridge,
1032 * because have already delayed for the bridge.
1030 */ 1033 */
1031 if (dev->runtime_d3cold) { 1034 if (dev->runtime_d3cold) {
1035 if (dev->d3cold_delay && !dev->imm_ready)
1036 msleep(dev->d3cold_delay);
1032 /* 1037 /*
1033 * When powering on a bridge from D3cold, the 1038 * When powering on a bridge from D3cold, the
1034 * whole hierarchy may be powered on into 1039 * whole hierarchy may be powered on into
@@ -4602,16 +4607,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
4602 4607
4603 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); 4608 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4604} 4609}
4605
4606/** 4610/**
4607 * pcie_wait_for_link_delay - Wait until link is active or inactive 4611 * pcie_wait_for_link - Wait until link is active or inactive
4608 * @pdev: Bridge device 4612 * @pdev: Bridge device
4609 * @active: waiting for active or inactive? 4613 * @active: waiting for active or inactive?
4610 * @delay: Delay to wait after link has become active (in ms)
4611 * 4614 *
4612 * Use this to wait till link becomes active or inactive. 4615 * Use this to wait till link becomes active or inactive.
4613 */ 4616 */
4614bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) 4617bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4615{ 4618{
4616 int timeout = 1000; 4619 int timeout = 1000;
4617 bool ret; 4620 bool ret;
@@ -4648,25 +4651,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
4648 timeout -= 10; 4651 timeout -= 10;
4649 } 4652 }
4650 if (active && ret) 4653 if (active && ret)
4651 msleep(delay); 4654 msleep(100);
4652 else if (ret != active) 4655 else if (ret != active)
4653 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", 4656 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4654 active ? "set" : "cleared"); 4657 active ? "set" : "cleared");
4655 return ret == active; 4658 return ret == active;
4656} 4659}
4657 4660
4658/**
4659 * pcie_wait_for_link - Wait until link is active or inactive
4660 * @pdev: Bridge device
4661 * @active: waiting for active or inactive?
4662 *
4663 * Use this to wait till link becomes active or inactive.
4664 */
4665bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4666{
4667 return pcie_wait_for_link_delay(pdev, active, 100);
4668}
4669
4670void pci_reset_secondary_bus(struct pci_dev *dev) 4661void pci_reset_secondary_bus(struct pci_dev *dev)
4671{ 4662{
4672 u16 ctrl; 4663 u16 ctrl;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1be03a97cb92..d22d1b807701 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -497,7 +497,6 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
497void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state, 497void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
498 u32 service); 498 u32 service);
499 499
500bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
501bool pcie_wait_for_link(struct pci_dev *pdev, bool active); 500bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
502#ifdef CONFIG_PCIEASPM 501#ifdef CONFIG_PCIEASPM
503void pcie_aspm_init_link_state(struct pci_dev *pdev); 502void pcie_aspm_init_link_state(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e44af7f4d37f..464f8f92653f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1170module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1170module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1171 NULL, 0644); 1171 NULL, 0644);
1172 1172
1173/**
1174 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1175 * @pdev: Target device.
1176 */
1177bool pcie_aspm_enabled(struct pci_dev *pdev)
1178{
1179 struct pci_dev *bridge = pci_upstream_bridge(pdev);
1180 bool ret;
1181
1182 if (!bridge)
1183 return false;
1184
1185 mutex_lock(&aspm_lock);
1186 ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
1187 mutex_unlock(&aspm_lock);
1188
1189 return ret;
1190}
1191EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1192
1173#ifdef CONFIG_PCIEASPM_DEBUG 1193#ifdef CONFIG_PCIEASPM_DEBUG
1174static ssize_t link_state_show(struct device *dev, 1194static ssize_t link_state_show(struct device *dev,
1175 struct device_attribute *attr, 1195 struct device_attribute *attr,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 308c3e0c4a34..1b330129089f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -9,7 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/errno.h> 12#include <linux/errno.h>
14#include <linux/pm.h> 13#include <linux/pm.h>
15#include <linux/pm_runtime.h> 14#include <linux/pm_runtime.h>
@@ -379,67 +378,6 @@ static int pm_iter(struct device *dev, void *data)
379 return 0; 378 return 0;
380} 379}
381 380
382static int get_downstream_delay(struct pci_bus *bus)
383{
384 struct pci_dev *pdev;
385 int min_delay = 100;
386 int max_delay = 0;
387
388 list_for_each_entry(pdev, &bus->devices, bus_list) {
389 if (!pdev->imm_ready)
390 min_delay = 0;
391 else if (pdev->d3cold_delay < min_delay)
392 min_delay = pdev->d3cold_delay;
393 if (pdev->d3cold_delay > max_delay)
394 max_delay = pdev->d3cold_delay;
395 }
396
397 return max(min_delay, max_delay);
398}
399
400/*
401 * wait_for_downstream_link - Wait for downstream link to establish
402 * @pdev: PCIe port whose downstream link is waited
403 *
404 * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
405 * access to the downstream component is permitted.
406 *
407 * This blocks PCI core resume of the hierarchy below this port until the
408 * link is trained. Should be called before resuming port services to
409 * prevent pciehp from starting to tear-down the hierarchy too soon.
410 */
411static void wait_for_downstream_link(struct pci_dev *pdev)
412{
413 int delay;
414
415 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
416 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
417 return;
418
419 if (pci_dev_is_disconnected(pdev))
420 return;
421
422 if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
423 !pdev->bridge_d3)
424 return;
425
426 delay = get_downstream_delay(pdev->subordinate);
427 if (!delay)
428 return;
429
430 dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
431
432 /*
433 * If downstream port does not support speeds greater than 5 GT/s
434 * need to wait 100ms. For higher speeds (gen3) we need to wait
435 * first for the data link layer to become active.
436 */
437 if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
438 msleep(delay);
439 else
440 pcie_wait_for_link_delay(pdev, true, delay);
441}
442
443/** 381/**
444 * pcie_port_device_suspend - suspend port services associated with a PCIe port 382 * pcie_port_device_suspend - suspend port services associated with a PCIe port
445 * @dev: PCI Express port to handle 383 * @dev: PCI Express port to handle
@@ -453,8 +391,6 @@ int pcie_port_device_suspend(struct device *dev)
453int pcie_port_device_resume_noirq(struct device *dev) 391int pcie_port_device_resume_noirq(struct device *dev)
454{ 392{
455 size_t off = offsetof(struct pcie_port_service_driver, resume_noirq); 393 size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
456
457 wait_for_downstream_link(to_pci_dev(dev));
458 return device_for_each_child(dev, &off, pm_iter); 394 return device_for_each_child(dev, &off, pm_iter);
459} 395}
460 396
@@ -485,8 +421,6 @@ int pcie_port_device_runtime_suspend(struct device *dev)
485int pcie_port_device_runtime_resume(struct device *dev) 421int pcie_port_device_runtime_resume(struct device *dev)
486{ 422{
487 size_t off = offsetof(struct pcie_port_service_driver, runtime_resume); 423 size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
488
489 wait_for_downstream_link(to_pci_dev(dev));
490 return device_for_each_child(dev, &off, pm_iter); 424 return device_for_each_child(dev, &off, pm_iter);
491} 425}
492#endif /* PM */ 426#endif /* PM */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 208aacf39329..44c4ae1abd00 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5256 */ 5256 */
5257 if (ioread32(map + 0x2240c) & 0x2) { 5257 if (ioread32(map + 0x2240c) & 0x2) {
5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); 5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5259 ret = pci_reset_function(pdev); 5259 ret = pci_reset_bus(pdev);
5260 if (ret < 0) 5260 if (ret < 0)
5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret); 5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5262 } 5262 }
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index eb6168e6ac43..590e594092f2 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -255,8 +255,10 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
255 switch (state->Vcc) { 255 switch (state->Vcc) {
256 case 50: 256 case 50:
257 ++v; 257 ++v;
258 /* fall through */
258 case 33: 259 case 33:
259 ++v; 260 ++v;
261 /* fall through */
260 case 0: 262 case 0:
261 break; 263 break;
262 default: 264 default:
@@ -267,9 +269,11 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
267 switch (state->Vpp) { 269 switch (state->Vpp) {
268 case 12: 270 case 12:
269 ++p; 271 ++p;
272 /* fall through */
270 case 33: 273 case 33:
271 case 50: 274 case 50:
272 ++p; 275 ++p;
276 /* fall through */
273 case 0: 277 case 0:
274 break; 278 break;
275 default: 279 default:
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2d06b8095a19..df352b334ea7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
723 cpu_pm_pmu_setup(armpmu, cmd); 723 cpu_pm_pmu_setup(armpmu, cmd);
724 break; 724 break;
725 case CPU_PM_EXIT: 725 case CPU_PM_EXIT:
726 cpu_pm_pmu_setup(armpmu, cmd);
727 case CPU_PM_ENTER_FAILED: 726 case CPU_PM_ENTER_FAILED:
727 cpu_pm_pmu_setup(armpmu, cmd);
728 armpmu->start(armpmu); 728 armpmu->start(armpmu);
729 break; 729 break;
730 default: 730 default:
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 384396cbb22d..22256576b69a 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2412,7 +2412,7 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = {
2412 { PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 }, 2412 { PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 },
2413}; 2413};
2414 2414
2415static int aspeed_g4_sig_expr_set(const struct aspeed_pinmux_data *ctx, 2415static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
2416 const struct aspeed_sig_expr *expr, 2416 const struct aspeed_sig_expr *expr,
2417 bool enable) 2417 bool enable)
2418{ 2418{
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 053101f795a2..ba6438ac4d72 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2507,6 +2507,61 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
2507 { PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 }, 2507 { PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 },
2508}; 2508};
2509 2509
2510static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
2511 int ip)
2512{
2513 if (ip == ASPEED_IP_SCU) {
2514 WARN(!ctx->maps[ip], "Missing SCU syscon!");
2515 return ctx->maps[ip];
2516 }
2517
2518 if (ip >= ASPEED_NR_PINMUX_IPS)
2519 return ERR_PTR(-EINVAL);
2520
2521 if (likely(ctx->maps[ip]))
2522 return ctx->maps[ip];
2523
2524 if (ip == ASPEED_IP_GFX) {
2525 struct device_node *node;
2526 struct regmap *map;
2527
2528 node = of_parse_phandle(ctx->dev->of_node,
2529 "aspeed,external-nodes", 0);
2530 if (node) {
2531 map = syscon_node_to_regmap(node);
2532 of_node_put(node);
2533 if (IS_ERR(map))
2534 return map;
2535 } else
2536 return ERR_PTR(-ENODEV);
2537
2538 ctx->maps[ASPEED_IP_GFX] = map;
2539 dev_dbg(ctx->dev, "Acquired GFX regmap");
2540 return map;
2541 }
2542
2543 if (ip == ASPEED_IP_LPC) {
2544 struct device_node *node;
2545 struct regmap *map;
2546
2547 node = of_parse_phandle(ctx->dev->of_node,
2548 "aspeed,external-nodes", 1);
2549 if (node) {
2550 map = syscon_node_to_regmap(node->parent);
2551 of_node_put(node);
2552 if (IS_ERR(map))
2553 return map;
2554 } else
2555 map = ERR_PTR(-ENODEV);
2556
2557 ctx->maps[ASPEED_IP_LPC] = map;
2558 dev_dbg(ctx->dev, "Acquired LPC regmap");
2559 return map;
2560 }
2561
2562 return ERR_PTR(-EINVAL);
2563}
2564
2510/** 2565/**
2511 * Configure a pin's signal by applying an expression's descriptor state for 2566 * Configure a pin's signal by applying an expression's descriptor state for
2512 * all descriptors in the expression. 2567 * all descriptors in the expression.
@@ -2520,7 +2575,7 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
2520 * Return: 0 if the expression is configured as requested and a negative error 2575 * Return: 0 if the expression is configured as requested and a negative error
2521 * code otherwise 2576 * code otherwise
2522 */ 2577 */
2523static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx, 2578static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
2524 const struct aspeed_sig_expr *expr, 2579 const struct aspeed_sig_expr *expr,
2525 bool enable) 2580 bool enable)
2526{ 2581{
@@ -2531,9 +2586,15 @@ static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx,
2531 const struct aspeed_sig_desc *desc = &expr->descs[i]; 2586 const struct aspeed_sig_desc *desc = &expr->descs[i];
2532 u32 pattern = enable ? desc->enable : desc->disable; 2587 u32 pattern = enable ? desc->enable : desc->disable;
2533 u32 val = (pattern << __ffs(desc->mask)); 2588 u32 val = (pattern << __ffs(desc->mask));
2589 struct regmap *map;
2534 2590
2535 if (!ctx->maps[desc->ip]) 2591 map = aspeed_g5_acquire_regmap(ctx, desc->ip);
2536 return -ENODEV; 2592 if (IS_ERR(map)) {
2593 dev_err(ctx->dev,
2594 "Failed to acquire regmap for IP block %d\n",
2595 desc->ip);
2596 return PTR_ERR(map);
2597 }
2537 2598
2538 /* 2599 /*
2539 * Strap registers are configured in hardware or by early-boot 2600 * Strap registers are configured in hardware or by early-boot
@@ -2641,34 +2702,11 @@ static struct pinctrl_desc aspeed_g5_pinctrl_desc = {
2641static int aspeed_g5_pinctrl_probe(struct platform_device *pdev) 2702static int aspeed_g5_pinctrl_probe(struct platform_device *pdev)
2642{ 2703{
2643 int i; 2704 int i;
2644 struct regmap *map;
2645 struct device_node *node;
2646 2705
2647 for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++) 2706 for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++)
2648 aspeed_g5_pins[i].number = i; 2707 aspeed_g5_pins[i].number = i;
2649 2708
2650 node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 0); 2709 aspeed_g5_pinctrl_data.pinmux.dev = &pdev->dev;
2651 map = syscon_node_to_regmap(node);
2652 of_node_put(node);
2653 if (IS_ERR(map)) {
2654 dev_warn(&pdev->dev, "No GFX phandle found, some mux configurations may fail\n");
2655 map = NULL;
2656 }
2657 aspeed_g5_pinctrl_data.pinmux.maps[ASPEED_IP_GFX] = map;
2658
2659 node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 1);
2660 if (node) {
2661 map = syscon_node_to_regmap(node->parent);
2662 if (IS_ERR(map)) {
2663 dev_warn(&pdev->dev, "LHC parent is not a syscon, some mux configurations may fail\n");
2664 map = NULL;
2665 }
2666 } else {
2667 dev_warn(&pdev->dev, "No LHC phandle found, some mux configurations may fail\n");
2668 map = NULL;
2669 }
2670 of_node_put(node);
2671 aspeed_g5_pinctrl_data.pinmux.maps[ASPEED_IP_LPC] = map;
2672 2710
2673 return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc, 2711 return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc,
2674 &aspeed_g5_pinctrl_data); 2712 &aspeed_g5_pinctrl_data);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 535db3de490b..54933665b5f8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -71,7 +71,7 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
71 return 0; 71 return 0;
72} 72}
73 73
74static int aspeed_sig_expr_enable(const struct aspeed_pinmux_data *ctx, 74static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
75 const struct aspeed_sig_expr *expr) 75 const struct aspeed_sig_expr *expr)
76{ 76{
77 int ret; 77 int ret;
@@ -86,7 +86,7 @@ static int aspeed_sig_expr_enable(const struct aspeed_pinmux_data *ctx,
86 return 0; 86 return 0;
87} 87}
88 88
89static int aspeed_sig_expr_disable(const struct aspeed_pinmux_data *ctx, 89static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
90 const struct aspeed_sig_expr *expr) 90 const struct aspeed_sig_expr *expr)
91{ 91{
92 int ret; 92 int ret;
@@ -109,7 +109,7 @@ static int aspeed_sig_expr_disable(const struct aspeed_pinmux_data *ctx,
109 * 109 *
110 * Return: 0 if all expressions are disabled, otherwise a negative error code 110 * Return: 0 if all expressions are disabled, otherwise a negative error code
111 */ 111 */
112static int aspeed_disable_sig(const struct aspeed_pinmux_data *ctx, 112static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
113 const struct aspeed_sig_expr **exprs) 113 const struct aspeed_sig_expr **exprs)
114{ 114{
115 int ret = 0; 115 int ret = 0;
@@ -217,8 +217,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
217{ 217{
218 int i; 218 int i;
219 int ret; 219 int ret;
220 const struct aspeed_pinctrl_data *pdata = 220 struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
221 pinctrl_dev_get_drvdata(pctldev);
222 const struct aspeed_pin_group *pgroup = &pdata->pinmux.groups[group]; 221 const struct aspeed_pin_group *pgroup = &pdata->pinmux.groups[group];
223 const struct aspeed_pin_function *pfunc = 222 const struct aspeed_pin_function *pfunc =
224 &pdata->pinmux.functions[function]; 223 &pdata->pinmux.functions[function];
@@ -306,8 +305,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
306 unsigned int offset) 305 unsigned int offset)
307{ 306{
308 int ret; 307 int ret;
309 const struct aspeed_pinctrl_data *pdata = 308 struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
310 pinctrl_dev_get_drvdata(pctldev);
311 const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data; 309 const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data;
312 const struct aspeed_sig_expr ***prios, **funcs, *expr; 310 const struct aspeed_sig_expr ***prios, **funcs, *expr;
313 311
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 5b0fe178ccf2..839c01b7953f 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -5,7 +5,7 @@
5 5
6#include "pinmux-aspeed.h" 6#include "pinmux-aspeed.h"
7 7
8const char *const aspeed_pinmux_ips[] = { 8static const char *const aspeed_pinmux_ips[] = {
9 [ASPEED_IP_SCU] = "SCU", 9 [ASPEED_IP_SCU] = "SCU",
10 [ASPEED_IP_GFX] = "GFX", 10 [ASPEED_IP_GFX] = "GFX",
11 [ASPEED_IP_LPC] = "LPC", 11 [ASPEED_IP_LPC] = "LPC",
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 329d54d48667..52d299b59ce2 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -702,11 +702,12 @@ struct aspeed_pin_function {
702struct aspeed_pinmux_data; 702struct aspeed_pinmux_data;
703 703
704struct aspeed_pinmux_ops { 704struct aspeed_pinmux_ops {
705 int (*set)(const struct aspeed_pinmux_data *ctx, 705 int (*set)(struct aspeed_pinmux_data *ctx,
706 const struct aspeed_sig_expr *expr, bool enabled); 706 const struct aspeed_sig_expr *expr, bool enabled);
707}; 707};
708 708
709struct aspeed_pinmux_data { 709struct aspeed_pinmux_data {
710 struct device *dev;
710 struct regmap *maps[ASPEED_NR_PINMUX_IPS]; 711 struct regmap *maps[ASPEED_NR_PINMUX_IPS];
711 712
712 const struct aspeed_pinmux_ops *ops; 713 const struct aspeed_pinmux_ops *ops;
@@ -725,7 +726,7 @@ int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
725 const struct aspeed_sig_expr *expr, 726 const struct aspeed_sig_expr *expr,
726 bool enabled); 727 bool enabled);
727 728
728static inline int aspeed_sig_expr_set(const struct aspeed_pinmux_data *ctx, 729static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx,
729 const struct aspeed_sig_expr *expr, 730 const struct aspeed_sig_expr *expr,
730 bool enabled) 731 bool enabled)
731{ 732{
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e504d255d5ce..430731cdf827 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
707 */ 707 */
708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) 708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
709{ 709{
710 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 710 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
713 713
@@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
722 */ 722 */
723static int __maybe_unused cros_ec_ishtp_resume(struct device *device) 723static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
724{ 724{
725 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 725 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
728 728
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 48d6f0d87583..83ed1fbf73cf 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = {
736}; 736};
737MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match); 737MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
738 738
739static const struct spi_device_id olpc_xo175_ec_id_table[] = {
740 { "xo1.75-ec", 0 },
741 {}
742};
743MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
744
739static struct spi_driver olpc_xo175_ec_spi_driver = { 745static struct spi_driver olpc_xo175_ec_spi_driver = {
740 .driver = { 746 .driver = {
741 .name = "olpc-xo175-ec", 747 .name = "olpc-xo175-ec",
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 235c0b89f824..c510d0d72475 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
812 INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), 812 INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
813 INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), 813 INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
814 INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), 814 INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
815 INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
815 {} 816 {}
816}; 817};
817 818
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index b0d3110ae378..e4c68efac0c2 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = {
93 93
94static struct gpio_keys_button apu2_keys_buttons[] = { 94static struct gpio_keys_button apu2_keys_buttons[] = {
95 { 95 {
96 .code = KEY_SETUP, 96 .code = KEY_RESTART,
97 .active_low = 1, 97 .active_low = 1,
98 .desc = "front button", 98 .desc = "front button",
99 .type = EV_KEY, 99 .type = EV_KEY,
@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
255MODULE_LICENSE("GPL"); 255MODULE_LICENSE("GPL");
256MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); 256MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
257MODULE_ALIAS("platform:pcengines-apuv2"); 257MODULE_ALIAS("platform:pcengines-apuv2");
258MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); 258MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
259MODULE_SOFTDEP("pre: platform:leds-gpio");
260MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de448de802..86d88aec94a1 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
742 USB_CH_IP_CUR_LVL_1P5; 742 USB_CH_IP_CUR_LVL_1P5;
743 break; 743 break;
744 } 744 }
745 /* Else, fall through */
745 case USB_STAT_HM_IDGND: 746 case USB_STAT_HM_IDGND:
746 dev_err(di->dev, "USB Type - Charging not allowed\n"); 747 dev_err(di->dev, "USB Type - Charging not allowed\n");
747 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; 748 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 9fd6dd342169..6df481896b5f 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1454,7 +1454,7 @@ static void __exit rapl_exit(void)
1454 unregister_pm_notifier(&rapl_pm_notifier); 1454 unregister_pm_notifier(&rapl_pm_notifier);
1455} 1455}
1456 1456
1457module_init(rapl_init); 1457fs_initcall(rapl_init);
1458module_exit(rapl_exit); 1458module_exit(rapl_exit);
1459 1459
1460MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code"); 1460MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 540e8aafc990..f808c5fa9838 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -671,7 +671,7 @@ static int __init powercap_init(void)
671 return class_register(&powercap_class); 671 return class_register(&powercap_class);
672} 672}
673 673
674device_initcall(powercap_init); 674fs_initcall(powercap_init);
675 675
676MODULE_DESCRIPTION("PowerCap sysfs Driver"); 676MODULE_DESCRIPTION("PowerCap sysfs Driver");
677MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); 677MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index c3ab07ab31a9..8edfac17364e 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -882,8 +882,11 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
882 return of_pwm_get(dev, dev->of_node, con_id); 882 return of_pwm_get(dev, dev->of_node, con_id);
883 883
884 /* then lookup via ACPI */ 884 /* then lookup via ACPI */
885 if (dev && is_acpi_node(dev->fwnode)) 885 if (dev && is_acpi_node(dev->fwnode)) {
886 return acpi_pwm_get(dev->fwnode); 886 pwm = acpi_pwm_get(dev->fwnode);
887 if (!IS_ERR(pwm) || PTR_ERR(pwm) != -ENOENT)
888 return pwm;
889 }
887 890
888 /* 891 /*
889 * We look up the provider in the static table typically provided by 892 * We look up the provider in the static table typically provided by
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 152053361862..989506bd90b1 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -174,14 +174,14 @@
174#define AXP803_DCDC5_1140mV_STEPS 35 174#define AXP803_DCDC5_1140mV_STEPS 35
175#define AXP803_DCDC5_1140mV_END \ 175#define AXP803_DCDC5_1140mV_END \
176 (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS) 176 (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
177#define AXP803_DCDC5_NUM_VOLTAGES 68 177#define AXP803_DCDC5_NUM_VOLTAGES 69
178 178
179#define AXP803_DCDC6_600mV_START 0x00 179#define AXP803_DCDC6_600mV_START 0x00
180#define AXP803_DCDC6_600mV_STEPS 50 180#define AXP803_DCDC6_600mV_STEPS 50
181#define AXP803_DCDC6_600mV_END \ 181#define AXP803_DCDC6_600mV_END \
182 (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS) 182 (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
183#define AXP803_DCDC6_1120mV_START 0x33 183#define AXP803_DCDC6_1120mV_START 0x33
184#define AXP803_DCDC6_1120mV_STEPS 14 184#define AXP803_DCDC6_1120mV_STEPS 20
185#define AXP803_DCDC6_1120mV_END \ 185#define AXP803_DCDC6_1120mV_END \
186 (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS) 186 (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
187#define AXP803_DCDC6_NUM_VOLTAGES 72 187#define AXP803_DCDC6_NUM_VOLTAGES 72
@@ -240,7 +240,7 @@
240#define AXP806_DCDCA_600mV_END \ 240#define AXP806_DCDCA_600mV_END \
241 (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS) 241 (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
242#define AXP806_DCDCA_1120mV_START 0x33 242#define AXP806_DCDCA_1120mV_START 0x33
243#define AXP806_DCDCA_1120mV_STEPS 14 243#define AXP806_DCDCA_1120mV_STEPS 20
244#define AXP806_DCDCA_1120mV_END \ 244#define AXP806_DCDCA_1120mV_END \
245 (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS) 245 (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
246#define AXP806_DCDCA_NUM_VOLTAGES 72 246#define AXP806_DCDCA_NUM_VOLTAGES 72
@@ -774,8 +774,8 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
774 AXP806_DCDCD_600mV_END, 774 AXP806_DCDCD_600mV_END,
775 20000), 775 20000),
776 REGULATOR_LINEAR_RANGE(1600000, 776 REGULATOR_LINEAR_RANGE(1600000,
777 AXP806_DCDCD_600mV_START, 777 AXP806_DCDCD_1600mV_START,
778 AXP806_DCDCD_600mV_END, 778 AXP806_DCDCD_1600mV_END,
779 100000), 779 100000),
780}; 780};
781 781
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 5d067f7c2116..0c440c5e2832 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -163,7 +163,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
163 struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent); 163 struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
164 struct regulator_config config = { }; 164 struct regulator_config config = { };
165 struct regulator_dev *rdev; 165 struct regulator_dev *rdev;
166 int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3; 166 int i, min_idx, max_idx;
167 167
168 platform_set_drvdata(pdev, lp87565); 168 platform_set_drvdata(pdev, lp87565);
169 169
@@ -182,9 +182,9 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
182 max_idx = LP87565_BUCK_3210; 182 max_idx = LP87565_BUCK_3210;
183 break; 183 break;
184 default: 184 default:
185 dev_err(lp87565->dev, "Invalid lp config %d\n", 185 min_idx = LP87565_BUCK_0;
186 lp87565->dev_type); 186 max_idx = LP87565_BUCK_3;
187 return -EINVAL; 187 break;
188 } 188 }
189 189
190 for (i = min_idx; i <= max_idx; i++) { 190 for (i = min_idx; i <= max_idx; i++) {
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 397918ebba55..9112faa6a9a0 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -416,8 +416,10 @@ device_node *regulator_of_get_init_node(struct device *dev,
416 if (!name) 416 if (!name)
417 name = child->name; 417 name = child->name;
418 418
419 if (!strcmp(desc->of_match, name)) 419 if (!strcmp(desc->of_match, name)) {
420 of_node_put(search);
420 return of_node_get(child); 421 return of_node_get(child);
422 }
421 } 423 }
422 424
423 of_node_put(search); 425 of_node_put(search);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b9ce93e9df89..99f86612f775 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
383 char msg_format; 383 char msg_format;
384 char msg_no; 384 char msg_no;
385 385
386 /*
387 * intrc values ENODEV, ENOLINK and EPERM
388 * will be optained from sleep_on to indicate that no
389 * IO operation can be started
390 */
391 if (cqr->intrc == -ENODEV)
392 return 1;
393
394 if (cqr->intrc == -ENOLINK)
395 return 1;
396
397 if (cqr->intrc == -EPERM)
398 return 1;
399
386 sense = dasd_get_sense(&cqr->irb); 400 sense = dasd_get_sense(&cqr->irb);
387 if (!sense) 401 if (!sense)
388 return 0; 402 return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
447 lcu->flags &= ~NEED_UAC_UPDATE; 461 lcu->flags &= ~NEED_UAC_UPDATE;
448 spin_unlock_irqrestore(&lcu->lock, flags); 462 spin_unlock_irqrestore(&lcu->lock, flags);
449 463
450 do { 464 rc = dasd_sleep_on(cqr);
451 rc = dasd_sleep_on(cqr); 465 if (rc && !suborder_not_supported(cqr)) {
452 if (rc && suborder_not_supported(cqr))
453 return -EOPNOTSUPP;
454 } while (rc && (cqr->retries > 0));
455 if (rc) {
456 spin_lock_irqsave(&lcu->lock, flags); 466 spin_lock_irqsave(&lcu->lock, flags);
457 lcu->flags |= NEED_UAC_UPDATE; 467 lcu->flags |= NEED_UAC_UPDATE;
458 spin_unlock_irqrestore(&lcu->lock, flags); 468 spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 8c9d412b6d33..e7cf0a1d4f71 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
398 } 398 }
399 if (dstat == 0x08) 399 if (dstat == 0x08)
400 break; 400 break;
401 /* else, fall through */
401 case 0x04: 402 case 0x04:
402 /* Device end interrupt. */ 403 /* Device end interrupt. */
403 if ((raw = req->info) == NULL) 404 if ((raw = req->info) == NULL)
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 8d3370da2dfc..3e0b2f63a9d2 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
677 switch (device->tape_state) { 677 switch (device->tape_state) {
678 case TS_INIT: 678 case TS_INIT:
679 tape_state_set(device, TS_NOT_OPER); 679 tape_state_set(device, TS_NOT_OPER);
680 /* fallthrough */
680 case TS_NOT_OPER: 681 case TS_NOT_OPER:
681 /* 682 /*
682 * Nothing to do. 683 * Nothing to do.
@@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
949 break; 950 break;
950 if (device->tape_state == TS_UNUSED) 951 if (device->tape_state == TS_UNUSED)
951 break; 952 break;
953 /* fallthrough */
952 default: 954 default:
953 if (device->tape_state == TS_BLKUSE) 955 if (device->tape_state == TS_BLKUSE)
954 break; 956 break;
@@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1116 case -ETIMEDOUT: 1118 case -ETIMEDOUT:
1117 DBF_LH(1, "(%08x): Request timed out\n", 1119 DBF_LH(1, "(%08x): Request timed out\n",
1118 device->cdev_id); 1120 device->cdev_id);
1121 /* fallthrough */
1119 case -EIO: 1122 case -EIO:
1120 __tape_end_request(device, request, -EIO); 1123 __tape_end_request(device, request, -EIO);
1121 break; 1124 break;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 730c4e68094b..4142c85e77d8 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -319,9 +319,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
319 int retries = 0, cc; 319 int retries = 0, cc;
320 unsigned long laob = 0; 320 unsigned long laob = 0;
321 321
322 WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) || 322 if (aob) {
323 !q->u.out.use_cq));
324 if (q->u.out.use_cq && aob != 0) {
325 fc = QDIO_SIGA_WRITEQ; 323 fc = QDIO_SIGA_WRITEQ;
326 laob = aob; 324 laob = aob;
327 } 325 }
@@ -621,9 +619,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
621{ 619{
622 unsigned long phys_aob = 0; 620 unsigned long phys_aob = 0;
623 621
624 if (!q->use_cq)
625 return 0;
626
627 if (!q->aobs[bufnr]) { 622 if (!q->aobs[bufnr]) {
628 struct qaob *aob = qdio_allocate_aob(); 623 struct qaob *aob = qdio_allocate_aob();
629 q->aobs[bufnr] = aob; 624 q->aobs[bufnr] = aob;
@@ -1308,6 +1303,8 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1308 1303
1309 for_each_output_queue(irq_ptr, q, i) { 1304 for_each_output_queue(irq_ptr, q, i) {
1310 if (use_cq) { 1305 if (use_cq) {
1306 if (multicast_outbound(q))
1307 continue;
1311 if (qdio_enable_async_operation(&q->u.out) < 0) { 1308 if (qdio_enable_async_operation(&q->u.out) < 0) {
1312 use_cq = 0; 1309 use_cq = 0;
1313 continue; 1310 continue;
@@ -1553,18 +1550,19 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1553 /* One SIGA-W per buffer required for unicast HSI */ 1550 /* One SIGA-W per buffer required for unicast HSI */
1554 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1551 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1555 1552
1556 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); 1553 if (q->u.out.use_cq)
1554 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1557 1555
1558 rc = qdio_kick_outbound_q(q, phys_aob); 1556 rc = qdio_kick_outbound_q(q, phys_aob);
1559 } else if (need_siga_sync(q)) { 1557 } else if (need_siga_sync(q)) {
1560 rc = qdio_siga_sync_q(q); 1558 rc = qdio_siga_sync_q(q);
1559 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1560 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1561 state == SLSB_CU_OUTPUT_PRIMED) {
1562 /* The previous buffer is not processed yet, tack on. */
1563 qperf_inc(q, fast_requeue);
1561 } else { 1564 } else {
1562 /* try to fast requeue buffers */ 1565 rc = qdio_kick_outbound_q(q, 0);
1563 get_buf_state(q, prev_buf(bufnr), &state, 0);
1564 if (state != SLSB_CU_OUTPUT_PRIMED)
1565 rc = qdio_kick_outbound_q(q, 0);
1566 else
1567 qperf_inc(q, fast_requeue);
1568 } 1566 }
1569 1567
1570 /* in case of SIGA errors we must process the error immediately */ 1568 /* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 8c1d2357ef5b..7a838e3d7c0f 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
70 70
71} 71}
72 72
73const struct vfio_ccw_regops vfio_ccw_async_region_ops = { 73static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
74 .read = vfio_ccw_async_region_read, 74 .read = vfio_ccw_async_region_read,
75 .write = vfio_ccw_async_region_write, 75 .write = vfio_ccw_async_region_write,
76 .release = vfio_ccw_async_region_release, 76 .release = vfio_ccw_async_region_release,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 1d4c893ead23..3645d1720c4b 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -72,8 +72,10 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
72 sizeof(*pa->pa_iova_pfn) + 72 sizeof(*pa->pa_iova_pfn) +
73 sizeof(*pa->pa_pfn), 73 sizeof(*pa->pa_pfn),
74 GFP_KERNEL); 74 GFP_KERNEL);
75 if (unlikely(!pa->pa_iova_pfn)) 75 if (unlikely(!pa->pa_iova_pfn)) {
76 pa->pa_nr = 0;
76 return -ENOMEM; 77 return -ENOMEM;
78 }
77 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; 79 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
78 80
79 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; 81 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
@@ -421,7 +423,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain,
421static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) 423static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
422{ 424{
423 struct ccwchain *chain; 425 struct ccwchain *chain;
424 int len; 426 int len, ret;
425 427
426 /* Copy 2K (the most we support today) of possible CCWs */ 428 /* Copy 2K (the most we support today) of possible CCWs */
427 len = copy_from_iova(cp->mdev, cp->guest_cp, cda, 429 len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
@@ -448,7 +450,12 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
448 memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); 450 memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
449 451
450 /* Loop for tics on this new chain. */ 452 /* Loop for tics on this new chain. */
451 return ccwchain_loop_tic(chain, cp); 453 ret = ccwchain_loop_tic(chain, cp);
454
455 if (ret)
456 ccwchain_free(chain);
457
458 return ret;
452} 459}
453 460
454/* Loop for TICs. */ 461/* Loop for TICs. */
@@ -642,17 +649,16 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
642 649
643 /* Build a ccwchain for the first CCW segment */ 650 /* Build a ccwchain for the first CCW segment */
644 ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); 651 ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
645 if (ret)
646 cp_free(cp);
647
648 /* It is safe to force: if not set but idals used
649 * ccwchain_calc_length returns an error.
650 */
651 cp->orb.cmd.c64 = 1;
652 652
653 if (!ret) 653 if (!ret) {
654 cp->initialized = true; 654 cp->initialized = true;
655 655
656 /* It is safe to force: if it was not set but idals used
657 * ccwchain_calc_length would have returned an error.
658 */
659 cp->orb.cmd.c64 = 1;
660 }
661
656 return ret; 662 return ret;
657} 663}
658 664
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 2b90a5ecaeb9..9208c0e56c33 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
88 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 88 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
89 if (scsw_is_solicited(&irb->scsw)) { 89 if (scsw_is_solicited(&irb->scsw)) {
90 cp_update_scsw(&private->cp, &irb->scsw); 90 cp_update_scsw(&private->cp, &irb->scsw);
91 if (is_final) 91 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
92 cp_free(&private->cp); 92 cp_free(&private->cp);
93 } 93 }
94 mutex_lock(&private->io_mutex); 94 mutex_lock(&private->io_mutex);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 5ea83dc4f1d7..dad2be333d82 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
152 ap_msg->receive(aq, ap_msg, aq->reply); 152 ap_msg->receive(aq, ap_msg, aq->reply);
153 break; 153 break;
154 } 154 }
155 /* fall through */
155 case AP_RESPONSE_NO_PENDING_REPLY: 156 case AP_RESPONSE_NO_PENDING_REPLY:
156 if (!status.queue_empty || aq->queue_count <= 0) 157 if (!status.queue_empty || aq->queue_count <= 0)
157 break; 158 break;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 12fe9deb265e..a36251d138fb 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
801 if (msg->cprbx.cprb_ver_id == 0x02) 801 if (msg->cprbx.cprb_ver_id == 0x02)
802 return convert_type86_ica(zq, reply, 802 return convert_type86_ica(zq, reply,
803 outputdata, outputdatalength); 803 outputdata, outputdatalength);
804 /* 804 /* fall through - wrong cprb version is an unknown response */
805 * Fall through, no break, incorrect cprb version is an unknown
806 * response
807 */
808 default: /* Unknown response type, this should NEVER EVER happen */ 805 default: /* Unknown response type, this should NEVER EVER happen */
809 zq->online = 0; 806 zq->online = 0;
810 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 807 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
837 } 834 }
838 if (msg->cprbx.cprb_ver_id == 0x02) 835 if (msg->cprbx.cprb_ver_id == 0x02)
839 return convert_type86_xcrb(zq, reply, xcRB); 836 return convert_type86_xcrb(zq, reply, xcRB);
840 /* 837 /* fall through - wrong cprb version is an unknown response */
841 * Fall through, no break, incorrect cprb version is an unknown
842 * response
843 */
844 default: /* Unknown response type, this should NEVER EVER happen */ 838 default: /* Unknown response type, this should NEVER EVER happen */
845 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 839 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
846 zq->online = 0; 840 zq->online = 0;
@@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
870 return convert_error(zq, reply); 864 return convert_error(zq, reply);
871 if (msg->cprbx.cprb_ver_id == 0x04) 865 if (msg->cprbx.cprb_ver_id == 0x04)
872 return convert_type86_ep11_xcrb(zq, reply, xcRB); 866 return convert_type86_ep11_xcrb(zq, reply, xcRB);
873 /* Fall through, no break, incorrect cprb version is an unknown resp.*/ 867 /* fall through - wrong cprb version is an unknown resp */
874 default: /* Unknown response type, this should NEVER EVER happen */ 868 default: /* Unknown response type, this should NEVER EVER happen */
875 zq->online = 0; 869 zq->online = 0;
876 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 870 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
900 return -EINVAL; 894 return -EINVAL;
901 if (msg->cprbx.cprb_ver_id == 0x02) 895 if (msg->cprbx.cprb_ver_id == 0x02)
902 return convert_type86_rng(zq, reply, data); 896 return convert_type86_rng(zq, reply, data);
903 /* 897 /* fall through - wrong cprb version is an unknown response */
904 * Fall through, no break, incorrect cprb version is an unknown
905 * response
906 */
907 default: /* Unknown response type, this should NEVER EVER happen */ 898 default: /* Unknown response type, this should NEVER EVER happen */
908 zq->online = 0; 899 zq->online = 0;
909 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 900 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 1b4ee570b712..4a8a5373cb35 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1704,6 +1704,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1704 grp->changed_side = 2; 1704 grp->changed_side = 2;
1705 break; 1705 break;
1706 } 1706 }
1707 /* Else, fall through */
1707 case MPCG_STATE_XID0IOWAIX: 1708 case MPCG_STATE_XID0IOWAIX:
1708 case MPCG_STATE_XID7INITW: 1709 case MPCG_STATE_XID7INITW:
1709 case MPCG_STATE_XID7INITX: 1710 case MPCG_STATE_XID7INITX:
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index e02f295d38a9..1534420a0243 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -357,6 +357,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
357 /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ 357 /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
358 if (callback) 358 if (callback)
359 grp->send_qllc_disc = 1; 359 grp->send_qllc_disc = 1;
360 /* Else, fall through */
360 case MPCG_STATE_XID0IOWAIT: 361 case MPCG_STATE_XID0IOWAIT:
361 fsm_deltimer(&grp->timer); 362 fsm_deltimer(&grp->timer);
362 grp->outstanding_xid2 = 0; 363 grp->outstanding_xid2 = 0;
@@ -1469,6 +1470,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
1469 if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) && 1470 if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
1470 (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) 1471 (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
1471 break; 1472 break;
1473 /* Else, fall through */
1472 default: 1474 default:
1473 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1475 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1474 } 1476 }
@@ -2089,6 +2091,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
2089 grp->estconnfunc = NULL; 2091 grp->estconnfunc = NULL;
2090 break; 2092 break;
2091 } 2093 }
2094 /* Else, fall through */
2092 case MPCG_STATE_FLOWC: 2095 case MPCG_STATE_FLOWC:
2093 case MPCG_STATE_READY: 2096 case MPCG_STATE_READY:
2094 grp->send_qllc_disc = 2; 2097 grp->send_qllc_disc = 2;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c7ee07ce3615..28db887d38ed 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -629,6 +629,7 @@ struct qeth_seqno {
629struct qeth_reply { 629struct qeth_reply {
630 struct list_head list; 630 struct list_head list;
631 struct completion received; 631 struct completion received;
632 spinlock_t lock;
632 int (*callback)(struct qeth_card *, struct qeth_reply *, 633 int (*callback)(struct qeth_card *, struct qeth_reply *,
633 unsigned long); 634 unsigned long);
634 u32 seqno; 635 u32 seqno;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d0caeebc802..6502b148541e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
544 if (reply) { 544 if (reply) {
545 refcount_set(&reply->refcnt, 1); 545 refcount_set(&reply->refcnt, 1);
546 init_completion(&reply->received); 546 init_completion(&reply->received);
547 spin_lock_init(&reply->lock);
547 } 548 }
548 return reply; 549 return reply;
549} 550}
@@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
799 800
800 if (!reply->callback) { 801 if (!reply->callback) {
801 rc = 0; 802 rc = 0;
803 goto no_callback;
804 }
805
806 spin_lock_irqsave(&reply->lock, flags);
807 if (reply->rc) {
808 /* Bail out when the requestor has already left: */
809 rc = reply->rc;
802 } else { 810 } else {
803 if (cmd) { 811 if (cmd) {
804 reply->offset = (u16)((char *)cmd - (char *)iob->data); 812 reply->offset = (u16)((char *)cmd - (char *)iob->data);
@@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
807 rc = reply->callback(card, reply, (unsigned long)iob); 815 rc = reply->callback(card, reply, (unsigned long)iob);
808 } 816 }
809 } 817 }
818 spin_unlock_irqrestore(&reply->lock, flags);
810 819
820no_callback:
811 if (rc <= 0) 821 if (rc <= 0)
812 qeth_notify_reply(reply, rc); 822 qeth_notify_reply(reply, rc);
813 qeth_put_reply(reply); 823 qeth_put_reply(reply);
@@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card,
1749 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1759 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1750 1760
1751 qeth_dequeue_reply(card, reply); 1761 qeth_dequeue_reply(card, reply);
1762
1763 if (reply_cb) {
1764 /* Wait until the callback for a late reply has completed: */
1765 spin_lock_irq(&reply->lock);
1766 if (rc)
1767 /* Zap any callback that's still pending: */
1768 reply->rc = rc;
1769 spin_unlock_irq(&reply->lock);
1770 }
1771
1752 if (!rc) 1772 if (!rc)
1753 rc = reply->rc; 1773 rc = reply->rc;
1754 qeth_put_reply(reply); 1774 qeth_put_reply(reply);
@@ -4354,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4354 get_user(req_len, &ureq->hdr.req_len)) 4374 get_user(req_len, &ureq->hdr.req_len))
4355 return -EFAULT; 4375 return -EFAULT;
4356 4376
4377 /* Sanitize user input, to avoid overflows in iob size calculation: */
4378 if (req_len > QETH_BUFSIZE)
4379 return -EINVAL;
4380
4357 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4381 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4358 if (!iob) 4382 if (!iob)
4359 return -ENOMEM; 4383 return -ENOMEM;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index fd64bc3f4062..cbead3d1b2fd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -333,7 +333,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
333 card->osn_info.data_cb(skb); 333 card->osn_info.data_cb(skb);
334 break; 334 break;
335 } 335 }
336 /* else unknown */ 336 /* Else, fall through */
337 default: 337 default:
338 dev_kfree_skb_any(skb); 338 dev_kfree_skb_any(skb);
339 QETH_CARD_TEXT(card, 3, "inbunkno"); 339 QETH_CARD_TEXT(card, 3, "inbunkno");
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 1a55e5942d36..957889a42d2e 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,8 @@ struct airq_info {
145 struct airq_iv *aiv; 145 struct airq_iv *aiv;
146}; 146};
147static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 147static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
148static DEFINE_MUTEX(airq_areas_lock);
149
148static u8 *summary_indicators; 150static u8 *summary_indicators;
149 151
150static inline u8 *get_summary_indicator(struct airq_info *info) 152static inline u8 *get_summary_indicator(struct airq_info *info)
@@ -265,9 +267,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
265 unsigned long bit, flags; 267 unsigned long bit, flags;
266 268
267 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 269 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
270 mutex_lock(&airq_areas_lock);
268 if (!airq_areas[i]) 271 if (!airq_areas[i])
269 airq_areas[i] = new_airq_info(i); 272 airq_areas[i] = new_airq_info(i);
270 info = airq_areas[i]; 273 info = airq_areas[i];
274 mutex_unlock(&airq_areas_lock);
271 if (!info) 275 if (!info)
272 return 0; 276 return 0;
273 write_lock_irqsave(&info->lock, flags); 277 write_lock_irqsave(&info->lock, flags);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 75f66f8ad3ea..1b92f3c19ff3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1523,10 +1523,10 @@ config SCSI_VIRTIO
1523 1523
1524source "drivers/scsi/csiostor/Kconfig" 1524source "drivers/scsi/csiostor/Kconfig"
1525 1525
1526endif # SCSI_LOWLEVEL
1527
1528source "drivers/scsi/pcmcia/Kconfig" 1526source "drivers/scsi/pcmcia/Kconfig"
1529 1527
1528endif # SCSI_LOWLEVEL
1529
1530source "drivers/scsi/device_handler/Kconfig" 1530source "drivers/scsi/device_handler/Kconfig"
1531 1531
1532endmenu 1532endmenu
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index aea4fd73c862..6c68c2303638 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -603,6 +603,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg)
603 msgqueue_flush(&info->scsi.msgs); 603 msgqueue_flush(&info->scsi.msgs);
604 msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); 604 msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
605 info->scsi.phase = PHASE_MSGOUT_EXPECT; 605 info->scsi.phase = PHASE_MSGOUT_EXPECT;
606 /* fall through */
606 607
607 case async: 608 case async:
608 dev->period = info->ifcfg.asyncperiod / 4; 609 dev->period = info->ifcfg.asyncperiod / 4;
@@ -915,6 +916,7 @@ static void fas216_disconnect_intr(FAS216_Info *info)
915 fas216_done(info, DID_ABORT); 916 fas216_done(info, DID_ABORT);
916 break; 917 break;
917 } 918 }
919 /* else, fall through */
918 920
919 default: /* huh? */ 921 default: /* huh? */
920 printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n", 922 printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
@@ -1411,6 +1413,8 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
1411 case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */ 1413 case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */
1412 case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */ 1414 case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */
1413 fas216_stoptransfer(info); 1415 fas216_stoptransfer(info);
1416 /* fall through */
1417
1414 case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */ 1418 case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */
1415 case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */ 1419 case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */
1416 case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */ 1420 case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */
@@ -1422,6 +1426,8 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
1422 case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */ 1426 case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */
1423 case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */ 1427 case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */
1424 fas216_stoptransfer(info); 1428 fas216_stoptransfer(info);
1429 /* fall through */
1430
1425 case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */ 1431 case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */
1426 case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */ 1432 case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */
1427 case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */ 1433 case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */
@@ -1575,6 +1581,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned
1575 fas216_message(info); 1581 fas216_message(info);
1576 break; 1582 break;
1577 } 1583 }
1584 /* else, fall through */
1578 1585
1579 default: 1586 default:
1580 fas216_log(info, 0, "internal phase %s for function done?" 1587 fas216_log(info, 0, "internal phase %s for function done?"
@@ -1957,6 +1964,7 @@ static void fas216_kick(FAS216_Info *info)
1957 switch (where_from) { 1964 switch (where_from) {
1958 case TYPE_QUEUE: 1965 case TYPE_QUEUE:
1959 fas216_allocate_tag(info, SCpnt); 1966 fas216_allocate_tag(info, SCpnt);
1967 /* fall through */
1960 case TYPE_OTHER: 1968 case TYPE_OTHER:
1961 fas216_start_command(info, SCpnt); 1969 fas216_start_command(info, SCpnt);
1962 break; 1970 break;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f0066f8a1786..4971104b1817 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
40#define ALUA_FAILOVER_TIMEOUT 60 40#define ALUA_FAILOVER_TIMEOUT 60
41#define ALUA_FAILOVER_RETRIES 5 41#define ALUA_FAILOVER_RETRIES 5
42#define ALUA_RTPG_DELAY_MSECS 5 42#define ALUA_RTPG_DELAY_MSECS 5
43#define ALUA_RTPG_RETRY_DELAY 2
43 44
44/* device handler flags */ 45/* device handler flags */
45#define ALUA_OPTIMIZE_STPG 0x01 46#define ALUA_OPTIMIZE_STPG 0x01
@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
682 case SCSI_ACCESS_STATE_TRANSITIONING: 683 case SCSI_ACCESS_STATE_TRANSITIONING:
683 if (time_before(jiffies, pg->expiry)) { 684 if (time_before(jiffies, pg->expiry)) {
684 /* State transition, retry */ 685 /* State transition, retry */
685 pg->interval = 2; 686 pg->interval = ALUA_RTPG_RETRY_DELAY;
686 err = SCSI_DH_RETRY; 687 err = SCSI_DH_RETRY;
687 } else { 688 } else {
688 struct alua_dh_data *h; 689 struct alua_dh_data *h;
@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work)
807 spin_lock_irqsave(&pg->lock, flags); 808 spin_lock_irqsave(&pg->lock, flags);
808 pg->flags &= ~ALUA_PG_RUNNING; 809 pg->flags &= ~ALUA_PG_RUNNING;
809 pg->flags |= ALUA_PG_RUN_RTPG; 810 pg->flags |= ALUA_PG_RUN_RTPG;
811 if (!pg->interval)
812 pg->interval = ALUA_RTPG_RETRY_DELAY;
810 spin_unlock_irqrestore(&pg->lock, flags); 813 spin_unlock_irqrestore(&pg->lock, flags);
811 queue_delayed_work(kaluad_wq, &pg->rtpg_work, 814 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
812 pg->interval * HZ); 815 pg->interval * HZ);
@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work)
818 spin_lock_irqsave(&pg->lock, flags); 821 spin_lock_irqsave(&pg->lock, flags);
819 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { 822 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
820 pg->flags &= ~ALUA_PG_RUNNING; 823 pg->flags &= ~ALUA_PG_RUNNING;
824 if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
825 pg->interval = ALUA_RTPG_RETRY_DELAY;
821 pg->flags |= ALUA_PG_RUN_RTPG; 826 pg->flags |= ALUA_PG_RUN_RTPG;
822 spin_unlock_irqrestore(&pg->lock, flags); 827 spin_unlock_irqrestore(&pg->lock, flags);
823 queue_delayed_work(kaluad_wq, &pg->rtpg_work, 828 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 590ec8009f52..1791a393795d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1019,7 +1019,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
1019{ 1019{
1020 struct fcoe_fcf *fcf; 1020 struct fcoe_fcf *fcf;
1021 struct fcoe_fcf new; 1021 struct fcoe_fcf new;
1022 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); 1022 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV);
1023 int first = 0; 1023 int first = 0;
1024 int mtu_valid; 1024 int mtu_valid;
1025 int found = 0; 1025 int found = 0;
@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
2005 */ 2005 */
2006static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) 2006static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
2007{ 2007{
2008 return (struct fcoe_rport *)(rdata + 1); 2008 return container_of(rdata, struct fcoe_rport, rdata);
2009} 2009}
2010 2010
2011/** 2011/**
@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
2269 */ 2269 */
2270static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, 2270static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
2271 struct sk_buff *skb, 2271 struct sk_buff *skb,
2272 struct fc_rport_priv *rdata) 2272 struct fcoe_rport *frport)
2273{ 2273{
2274 struct fip_header *fiph; 2274 struct fip_header *fiph;
2275 struct fip_desc *desc = NULL; 2275 struct fip_desc *desc = NULL;
@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
2277 struct fip_wwn_desc *wwn = NULL; 2277 struct fip_wwn_desc *wwn = NULL;
2278 struct fip_vn_desc *vn = NULL; 2278 struct fip_vn_desc *vn = NULL;
2279 struct fip_size_desc *size = NULL; 2279 struct fip_size_desc *size = NULL;
2280 struct fcoe_rport *frport;
2281 size_t rlen; 2280 size_t rlen;
2282 size_t dlen; 2281 size_t dlen;
2283 u32 desc_mask = 0; 2282 u32 desc_mask = 0;
2284 u32 dtype; 2283 u32 dtype;
2285 u8 sub; 2284 u8 sub;
2286 2285
2287 memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
2288 frport = fcoe_ctlr_rport(rdata);
2289
2290 fiph = (struct fip_header *)skb->data; 2286 fiph = (struct fip_header *)skb->data;
2291 frport->flags = ntohs(fiph->fip_flags); 2287 frport->flags = ntohs(fiph->fip_flags);
2292 2288
@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
2349 if (dlen != sizeof(struct fip_wwn_desc)) 2345 if (dlen != sizeof(struct fip_wwn_desc))
2350 goto len_err; 2346 goto len_err;
2351 wwn = (struct fip_wwn_desc *)desc; 2347 wwn = (struct fip_wwn_desc *)desc;
2352 rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); 2348 frport->rdata.ids.node_name =
2349 get_unaligned_be64(&wwn->fd_wwn);
2353 break; 2350 break;
2354 case FIP_DT_VN_ID: 2351 case FIP_DT_VN_ID:
2355 if (dlen != sizeof(struct fip_vn_desc)) 2352 if (dlen != sizeof(struct fip_vn_desc))
2356 goto len_err; 2353 goto len_err;
2357 vn = (struct fip_vn_desc *)desc; 2354 vn = (struct fip_vn_desc *)desc;
2358 memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); 2355 memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
2359 rdata->ids.port_id = ntoh24(vn->fd_fc_id); 2356 frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
2360 rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); 2357 frport->rdata.ids.port_name =
2358 get_unaligned_be64(&vn->fd_wwpn);
2361 break; 2359 break;
2362 case FIP_DT_FC4F: 2360 case FIP_DT_FC4F:
2363 if (dlen != sizeof(struct fip_fc4_feat)) 2361 if (dlen != sizeof(struct fip_fc4_feat))
@@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
2403/** 2401/**
2404 * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. 2402 * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
2405 * @fip: The FCoE controller 2403 * @fip: The FCoE controller
2406 * @rdata: parsed remote port with frport from the probe request 2404 * @frport: parsed FCoE rport from the probe request
2407 * 2405 *
2408 * Called with ctlr_mutex held. 2406 * Called with ctlr_mutex held.
2409 */ 2407 */
2410static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, 2408static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
2411 struct fc_rport_priv *rdata) 2409 struct fcoe_rport *frport)
2412{ 2410{
2413 struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); 2411 if (frport->rdata.ids.port_id != fip->port_id)
2414
2415 if (rdata->ids.port_id != fip->port_id)
2416 return; 2412 return;
2417 2413
2418 switch (fip->state) { 2414 switch (fip->state) {
@@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
2432 * Probe's REC bit is not set. 2428 * Probe's REC bit is not set.
2433 * If we don't reply, we will change our address. 2429 * If we don't reply, we will change our address.
2434 */ 2430 */
2435 if (fip->lp->wwpn > rdata->ids.port_name && 2431 if (fip->lp->wwpn > frport->rdata.ids.port_name &&
2436 !(frport->flags & FIP_FL_REC_OR_P2P)) { 2432 !(frport->flags & FIP_FL_REC_OR_P2P)) {
2437 LIBFCOE_FIP_DBG(fip, "vn_probe_req: " 2433 LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
2438 "port_id collision\n"); 2434 "port_id collision\n");
@@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
2456/** 2452/**
2457 * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. 2453 * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
2458 * @fip: The FCoE controller 2454 * @fip: The FCoE controller
2459 * @rdata: parsed remote port with frport from the probe request 2455 * @frport: parsed FCoE rport from the probe request
2460 * 2456 *
2461 * Called with ctlr_mutex held. 2457 * Called with ctlr_mutex held.
2462 */ 2458 */
2463static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, 2459static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
2464 struct fc_rport_priv *rdata) 2460 struct fcoe_rport *frport)
2465{ 2461{
2466 if (rdata->ids.port_id != fip->port_id) 2462 if (frport->rdata.ids.port_id != fip->port_id)
2467 return; 2463 return;
2468 switch (fip->state) { 2464 switch (fip->state) {
2469 case FIP_ST_VNMP_START: 2465 case FIP_ST_VNMP_START:
@@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
2486/** 2482/**
2487 * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. 2483 * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
2488 * @fip: The FCoE controller 2484 * @fip: The FCoE controller
2489 * @new: newly-parsed remote port with frport as a template for new rdata 2485 * @new: newly-parsed FCoE rport as a template for new rdata
2490 * 2486 *
2491 * Called with ctlr_mutex held. 2487 * Called with ctlr_mutex held.
2492 */ 2488 */
2493static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) 2489static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new)
2494{ 2490{
2495 struct fc_lport *lport = fip->lp; 2491 struct fc_lport *lport = fip->lp;
2496 struct fc_rport_priv *rdata; 2492 struct fc_rport_priv *rdata;
@@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
2498 struct fcoe_rport *frport; 2494 struct fcoe_rport *frport;
2499 u32 port_id; 2495 u32 port_id;
2500 2496
2501 port_id = new->ids.port_id; 2497 port_id = new->rdata.ids.port_id;
2502 if (port_id == fip->port_id) 2498 if (port_id == fip->port_id)
2503 return; 2499 return;
2504 2500
@@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
2515 rdata->disc_id = lport->disc.disc_id; 2511 rdata->disc_id = lport->disc.disc_id;
2516 2512
2517 ids = &rdata->ids; 2513 ids = &rdata->ids;
2518 if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || 2514 if ((ids->port_name != -1 &&
2519 (ids->node_name != -1 && ids->node_name != new->ids.node_name)) { 2515 ids->port_name != new->rdata.ids.port_name) ||
2516 (ids->node_name != -1 &&
2517 ids->node_name != new->rdata.ids.node_name)) {
2520 mutex_unlock(&rdata->rp_mutex); 2518 mutex_unlock(&rdata->rp_mutex);
2521 LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); 2519 LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
2522 fc_rport_logoff(rdata); 2520 fc_rport_logoff(rdata);
2523 mutex_lock(&rdata->rp_mutex); 2521 mutex_lock(&rdata->rp_mutex);
2524 } 2522 }
2525 ids->port_name = new->ids.port_name; 2523 ids->port_name = new->rdata.ids.port_name;
2526 ids->node_name = new->ids.node_name; 2524 ids->node_name = new->rdata.ids.node_name;
2527 mutex_unlock(&rdata->rp_mutex); 2525 mutex_unlock(&rdata->rp_mutex);
2528 2526
2529 frport = fcoe_ctlr_rport(rdata); 2527 frport = fcoe_ctlr_rport(rdata);
2530 LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", 2528 LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
2531 port_id, frport->fcoe_len ? "old" : "new", 2529 port_id, frport->fcoe_len ? "old" : "new",
2532 rdata->rp_state); 2530 rdata->rp_state);
2533 *frport = *fcoe_ctlr_rport(new); 2531 frport->fcoe_len = new->fcoe_len;
2532 frport->flags = new->flags;
2533 frport->login_count = new->login_count;
2534 memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN);
2535 memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN);
2534 frport->time = 0; 2536 frport->time = 0;
2535} 2537}
2536 2538
@@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
2562/** 2564/**
2563 * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification 2565 * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
2564 * @fip: The FCoE controller 2566 * @fip: The FCoE controller
2565 * @new: newly-parsed remote port with frport as a template for new rdata 2567 * @new: newly-parsed FCoE rport as a template for new rdata
2566 * 2568 *
2567 * Called with ctlr_mutex held. 2569 * Called with ctlr_mutex held.
2568 */ 2570 */
2569static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, 2571static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2570 struct fc_rport_priv *new) 2572 struct fcoe_rport *new)
2571{ 2573{
2572 struct fcoe_rport *frport = fcoe_ctlr_rport(new); 2574 if (new->flags & FIP_FL_REC_OR_P2P) {
2573
2574 if (frport->flags & FIP_FL_REC_OR_P2P) {
2575 LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); 2575 LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
2576 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); 2576 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2577 return; 2577 return;
@@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2580 case FIP_ST_VNMP_START: 2580 case FIP_ST_VNMP_START:
2581 case FIP_ST_VNMP_PROBE1: 2581 case FIP_ST_VNMP_PROBE1:
2582 case FIP_ST_VNMP_PROBE2: 2582 case FIP_ST_VNMP_PROBE2:
2583 if (new->ids.port_id == fip->port_id) { 2583 if (new->rdata.ids.port_id == fip->port_id) {
2584 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " 2584 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2585 "restart, state %d\n", 2585 "restart, state %d\n",
2586 fip->state); 2586 fip->state);
@@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2589 break; 2589 break;
2590 case FIP_ST_VNMP_CLAIM: 2590 case FIP_ST_VNMP_CLAIM:
2591 case FIP_ST_VNMP_UP: 2591 case FIP_ST_VNMP_UP:
2592 if (new->ids.port_id == fip->port_id) { 2592 if (new->rdata.ids.port_id == fip->port_id) {
2593 if (new->ids.port_name > fip->lp->wwpn) { 2593 if (new->rdata.ids.port_name > fip->lp->wwpn) {
2594 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " 2594 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2595 "restart, port_id collision\n"); 2595 "restart, port_id collision\n");
2596 fcoe_ctlr_vn_restart(fip); 2596 fcoe_ctlr_vn_restart(fip);
@@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2602 break; 2602 break;
2603 } 2603 }
2604 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", 2604 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
2605 new->ids.port_id); 2605 new->rdata.ids.port_id);
2606 fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, 2606 fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac,
2607 min((u32)frport->fcoe_len, 2607 min((u32)new->fcoe_len,
2608 fcoe_ctlr_fcoe_size(fip))); 2608 fcoe_ctlr_fcoe_size(fip)));
2609 fcoe_ctlr_vn_add(fip, new); 2609 fcoe_ctlr_vn_add(fip, new);
2610 break; 2610 break;
2611 default: 2611 default:
2612 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " 2612 LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
2613 "ignoring claim from %x\n", new->ids.port_id); 2613 "ignoring claim from %x\n",
2614 new->rdata.ids.port_id);
2614 break; 2615 break;
2615 } 2616 }
2616} 2617}
@@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
2618/** 2619/**
2619 * fcoe_ctlr_vn_claim_resp() - handle received Claim Response 2620 * fcoe_ctlr_vn_claim_resp() - handle received Claim Response
2620 * @fip: The FCoE controller that received the frame 2621 * @fip: The FCoE controller that received the frame
2621 * @new: newly-parsed remote port with frport from the Claim Response 2622 * @new: newly-parsed FCoE rport from the Claim Response
2622 * 2623 *
2623 * Called with ctlr_mutex held. 2624 * Called with ctlr_mutex held.
2624 */ 2625 */
2625static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, 2626static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
2626 struct fc_rport_priv *new) 2627 struct fcoe_rport *new)
2627{ 2628{
2628 LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", 2629 LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
2629 new->ids.port_id, fcoe_ctlr_state(fip->state)); 2630 new->rdata.ids.port_id, fcoe_ctlr_state(fip->state));
2630 if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) 2631 if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
2631 fcoe_ctlr_vn_add(fip, new); 2632 fcoe_ctlr_vn_add(fip, new);
2632} 2633}
@@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
2634/** 2635/**
2635 * fcoe_ctlr_vn_beacon() - handle received beacon. 2636 * fcoe_ctlr_vn_beacon() - handle received beacon.
2636 * @fip: The FCoE controller that received the frame 2637 * @fip: The FCoE controller that received the frame
2637 * @new: newly-parsed remote port with frport from the Beacon 2638 * @new: newly-parsed FCoE rport from the Beacon
2638 * 2639 *
2639 * Called with ctlr_mutex held. 2640 * Called with ctlr_mutex held.
2640 */ 2641 */
2641static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, 2642static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
2642 struct fc_rport_priv *new) 2643 struct fcoe_rport *new)
2643{ 2644{
2644 struct fc_lport *lport = fip->lp; 2645 struct fc_lport *lport = fip->lp;
2645 struct fc_rport_priv *rdata; 2646 struct fc_rport_priv *rdata;
2646 struct fcoe_rport *frport; 2647 struct fcoe_rport *frport;
2647 2648
2648 frport = fcoe_ctlr_rport(new); 2649 if (new->flags & FIP_FL_REC_OR_P2P) {
2649 if (frport->flags & FIP_FL_REC_OR_P2P) {
2650 LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); 2650 LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
2651 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); 2651 fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
2652 return; 2652 return;
2653 } 2653 }
2654 rdata = fc_rport_lookup(lport, new->ids.port_id); 2654 rdata = fc_rport_lookup(lport, new->rdata.ids.port_id);
2655 if (rdata) { 2655 if (rdata) {
2656 if (rdata->ids.node_name == new->ids.node_name && 2656 if (rdata->ids.node_name == new->rdata.ids.node_name &&
2657 rdata->ids.port_name == new->ids.port_name) { 2657 rdata->ids.port_name == new->rdata.ids.port_name) {
2658 frport = fcoe_ctlr_rport(rdata); 2658 frport = fcoe_ctlr_rport(rdata);
2659
2659 LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", 2660 LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
2660 rdata->ids.port_id); 2661 rdata->ids.port_id);
2661 if (!frport->time && fip->state == FIP_ST_VNMP_UP) { 2662 if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
@@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
2678 * Don't add the neighbor yet. 2679 * Don't add the neighbor yet.
2679 */ 2680 */
2680 LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", 2681 LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
2681 new->ids.port_id); 2682 new->rdata.ids.port_id);
2682 if (time_after(jiffies, 2683 if (time_after(jiffies,
2683 fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) 2684 fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
2684 fcoe_ctlr_vn_send_claim(fip); 2685 fcoe_ctlr_vn_send_claim(fip);
@@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2738{ 2739{
2739 struct fip_header *fiph; 2740 struct fip_header *fiph;
2740 enum fip_vn2vn_subcode sub; 2741 enum fip_vn2vn_subcode sub;
2741 struct { 2742 struct fcoe_rport frport = { };
2742 struct fc_rport_priv rdata;
2743 struct fcoe_rport frport;
2744 } buf;
2745 int rc, vlan_id = 0; 2743 int rc, vlan_id = 0;
2746 2744
2747 fiph = (struct fip_header *)skb->data; 2745 fiph = (struct fip_header *)skb->data;
@@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2757 goto drop; 2755 goto drop;
2758 } 2756 }
2759 2757
2760 rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); 2758 rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
2761 if (rc) { 2759 if (rc) {
2762 LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); 2760 LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
2763 goto drop; 2761 goto drop;
@@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2766 mutex_lock(&fip->ctlr_mutex); 2764 mutex_lock(&fip->ctlr_mutex);
2767 switch (sub) { 2765 switch (sub) {
2768 case FIP_SC_VN_PROBE_REQ: 2766 case FIP_SC_VN_PROBE_REQ:
2769 fcoe_ctlr_vn_probe_req(fip, &buf.rdata); 2767 fcoe_ctlr_vn_probe_req(fip, &frport);
2770 break; 2768 break;
2771 case FIP_SC_VN_PROBE_REP: 2769 case FIP_SC_VN_PROBE_REP:
2772 fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); 2770 fcoe_ctlr_vn_probe_reply(fip, &frport);
2773 break; 2771 break;
2774 case FIP_SC_VN_CLAIM_NOTIFY: 2772 case FIP_SC_VN_CLAIM_NOTIFY:
2775 fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); 2773 fcoe_ctlr_vn_claim_notify(fip, &frport);
2776 break; 2774 break;
2777 case FIP_SC_VN_CLAIM_REP: 2775 case FIP_SC_VN_CLAIM_REP:
2778 fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); 2776 fcoe_ctlr_vn_claim_resp(fip, &frport);
2779 break; 2777 break;
2780 case FIP_SC_VN_BEACON: 2778 case FIP_SC_VN_BEACON:
2781 fcoe_ctlr_vn_beacon(fip, &buf.rdata); 2779 fcoe_ctlr_vn_beacon(fip, &frport);
2782 break; 2780 break;
2783 default: 2781 default:
2784 LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); 2782 LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2802,22 +2800,18 @@ drop:
2802 */ 2800 */
2803static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, 2801static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
2804 struct sk_buff *skb, 2802 struct sk_buff *skb,
2805 struct fc_rport_priv *rdata) 2803 struct fcoe_rport *frport)
2806{ 2804{
2807 struct fip_header *fiph; 2805 struct fip_header *fiph;
2808 struct fip_desc *desc = NULL; 2806 struct fip_desc *desc = NULL;
2809 struct fip_mac_desc *macd = NULL; 2807 struct fip_mac_desc *macd = NULL;
2810 struct fip_wwn_desc *wwn = NULL; 2808 struct fip_wwn_desc *wwn = NULL;
2811 struct fcoe_rport *frport;
2812 size_t rlen; 2809 size_t rlen;
2813 size_t dlen; 2810 size_t dlen;
2814 u32 desc_mask = 0; 2811 u32 desc_mask = 0;
2815 u32 dtype; 2812 u32 dtype;
2816 u8 sub; 2813 u8 sub;
2817 2814
2818 memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
2819 frport = fcoe_ctlr_rport(rdata);
2820
2821 fiph = (struct fip_header *)skb->data; 2815 fiph = (struct fip_header *)skb->data;
2822 frport->flags = ntohs(fiph->fip_flags); 2816 frport->flags = ntohs(fiph->fip_flags);
2823 2817
@@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
2871 if (dlen != sizeof(struct fip_wwn_desc)) 2865 if (dlen != sizeof(struct fip_wwn_desc))
2872 goto len_err; 2866 goto len_err;
2873 wwn = (struct fip_wwn_desc *)desc; 2867 wwn = (struct fip_wwn_desc *)desc;
2874 rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); 2868 frport->rdata.ids.node_name =
2869 get_unaligned_be64(&wwn->fd_wwn);
2875 break; 2870 break;
2876 default: 2871 default:
2877 LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " 2872 LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
2957/** 2952/**
2958 * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. 2953 * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
2959 * @fip: The FCoE controller 2954 * @fip: The FCoE controller
2955 * @frport: The newly-parsed FCoE rport from the Discovery Request
2960 * 2956 *
2961 * Called with ctlr_mutex held. 2957 * Called with ctlr_mutex held.
2962 */ 2958 */
2963static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, 2959static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
2964 struct fc_rport_priv *rdata) 2960 struct fcoe_rport *frport)
2965{ 2961{
2966 struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
2967 enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; 2962 enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
2968 2963
2969 if (fip->mode == FIP_MODE_VN2VN) 2964 if (fip->mode == FIP_MODE_VN2VN)
@@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2982{ 2977{
2983 struct fip_header *fiph; 2978 struct fip_header *fiph;
2984 enum fip_vlan_subcode sub; 2979 enum fip_vlan_subcode sub;
2985 struct { 2980 struct fcoe_rport frport = { };
2986 struct fc_rport_priv rdata;
2987 struct fcoe_rport frport;
2988 } buf;
2989 int rc; 2981 int rc;
2990 2982
2991 fiph = (struct fip_header *)skb->data; 2983 fiph = (struct fip_header *)skb->data;
2992 sub = fiph->fip_subcode; 2984 sub = fiph->fip_subcode;
2993 rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); 2985 rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
2994 if (rc) { 2986 if (rc) {
2995 LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); 2987 LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
2996 goto drop; 2988 goto drop;
2997 } 2989 }
2998 mutex_lock(&fip->ctlr_mutex); 2990 mutex_lock(&fip->ctlr_mutex);
2999 if (sub == FIP_SC_VL_REQ) 2991 if (sub == FIP_SC_VL_REQ)
3000 fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); 2992 fcoe_ctlr_vlan_disc_reply(fip, &frport);
3001 mutex_unlock(&fip->ctlr_mutex); 2993 mutex_unlock(&fip->ctlr_mutex);
3002 2994
3003drop: 2995drop:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 43a6b5350775..1bb6aada93fa 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2334 case IOACCEL2_SERV_RESPONSE_COMPLETE: 2334 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2335 switch (c2->error_data.status) { 2335 switch (c2->error_data.status) {
2336 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 2336 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2337 if (cmd)
2338 cmd->result = 0;
2337 break; 2339 break;
2338 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 2340 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2339 cmd->result |= SAM_STAT_CHECK_CONDITION; 2341 cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
2483 2485
2484 /* check for good status */ 2486 /* check for good status */
2485 if (likely(c2->error_data.serv_response == 0 && 2487 if (likely(c2->error_data.serv_response == 0 &&
2486 c2->error_data.status == 0)) 2488 c2->error_data.status == 0)) {
2489 cmd->result = 0;
2487 return hpsa_cmd_free_and_done(h, c, cmd); 2490 return hpsa_cmd_free_and_done(h, c, cmd);
2491 }
2488 2492
2489 /* 2493 /*
2490 * Any RAID offload error results in retry which will use 2494 * Any RAID offload error results in retry which will use
@@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5654 return SCSI_MLQUEUE_DEVICE_BUSY; 5658 return SCSI_MLQUEUE_DEVICE_BUSY;
5655 5659
5656 /* 5660 /*
5661 * This is necessary because the SML doesn't zero out this field during
5662 * error recovery.
5663 */
5664 cmd->result = 0;
5665
5666 /*
5657 * Call alternate submit routine for I/O accelerated commands. 5667 * Call alternate submit routine for I/O accelerated commands.
5658 * Retries always go down the normal I/O path. 5668 * Retries always go down the normal I/O path.
5659 */ 5669 */
@@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6081 if (idx != h->last_collision_tag) { /* Print once per tag */ 6091 if (idx != h->last_collision_tag) { /* Print once per tag */
6082 dev_warn(&h->pdev->dev, 6092 dev_warn(&h->pdev->dev,
6083 "%s: tag collision (tag=%d)\n", __func__, idx); 6093 "%s: tag collision (tag=%d)\n", __func__, idx);
6084 if (c->scsi_cmd != NULL)
6085 scsi_print_command(c->scsi_cmd);
6086 if (scmd) 6094 if (scmd)
6087 scsi_print_command(scmd); 6095 scsi_print_command(scmd);
6088 h->last_collision_tag = idx; 6096 h->last_collision_tag = idx;
@@ -7798,7 +7806,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
7798 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ 7806 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7799 /* 7807 /*
7800 * call pci_disable_device before pci_release_regions per 7808 * call pci_disable_device before pci_release_regions per
7801 * Documentation/PCI/pci.rst 7809 * Documentation/driver-api/pci/pci.rst
7802 */ 7810 */
7803 pci_disable_device(h->pdev); /* pci_init 1 */ 7811 pci_disable_device(h->pdev); /* pci_init 1 */
7804 pci_release_regions(h->pdev); /* pci_init 2 */ 7812 pci_release_regions(h->pdev); /* pci_init 2 */
@@ -7881,7 +7889,7 @@ clean2: /* intmode+region, pci */
7881clean1: 7889clean1:
7882 /* 7890 /*
7883 * call pci_disable_device before pci_release_regions per 7891 * call pci_disable_device before pci_release_regions per
7884 * Documentation/PCI/pci.rst 7892 * Documentation/driver-api/pci/pci.rst
7885 */ 7893 */
7886 pci_disable_device(h->pdev); 7894 pci_disable_device(h->pdev);
7887 pci_release_regions(h->pdev); 7895 pci_release_regions(h->pdev);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index acd16e0d52cf..8cdbac076a1b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
4864 4864
4865 spin_lock_irqsave(vhost->host->host_lock, flags); 4865 spin_lock_irqsave(vhost->host->host_lock, flags);
4866 ibmvfc_purge_requests(vhost, DID_ERROR); 4866 ibmvfc_purge_requests(vhost, DID_ERROR);
4867 ibmvfc_free_event_pool(vhost);
4868 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4867 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4868 ibmvfc_free_event_pool(vhost);
4869 4869
4870 ibmvfc_free_mem(vhost); 4870 ibmvfc_free_mem(vhost);
4871 spin_lock(&ibmvfc_driver_lock); 4871 spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index e0f3852fdad1..da6e97d8dc3b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
128struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) 128struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
129{ 129{
130 struct fc_rport_priv *rdata; 130 struct fc_rport_priv *rdata;
131 size_t rport_priv_size = sizeof(*rdata);
131 132
132 lockdep_assert_held(&lport->disc.disc_mutex); 133 lockdep_assert_held(&lport->disc.disc_mutex);
133 134
@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
135 if (rdata) 136 if (rdata)
136 return rdata; 137 return rdata;
137 138
138 rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); 139 if (lport->rport_priv_size > 0)
140 rport_priv_size = lport->rport_priv_size;
141 rdata = kzalloc(rport_priv_size, GFP_KERNEL);
139 if (!rdata) 142 if (!rdata)
140 return NULL; 143 return NULL;
141 144
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index abcad097ff2f..f47b4b281b14 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work)
459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); 459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
460 /* Fall through */ 460 /* Fall through */
461#endif 461#endif
462 /* Fall through - only for the #else condition above. */
462 default: 463 default:
463 error = -ENXIO; 464 error = -ENXIO;
464 pr_err("unhandled device %d\n", dev->dev_type); 465 pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..bade2e025ecf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -824,6 +824,7 @@ struct lpfc_hba {
824 uint32_t cfg_cq_poll_threshold; 824 uint32_t cfg_cq_poll_threshold;
825 uint32_t cfg_cq_max_proc_limit; 825 uint32_t cfg_cq_max_proc_limit;
826 uint32_t cfg_fcp_cpu_map; 826 uint32_t cfg_fcp_cpu_map;
827 uint32_t cfg_fcp_mq_threshold;
827 uint32_t cfg_hdw_queue; 828 uint32_t cfg_hdw_queue;
828 uint32_t cfg_irq_chann; 829 uint32_t cfg_irq_chann;
829 uint32_t cfg_suppress_rsp; 830 uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..8d8c495b5b60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5709 "Embed NVME Command in WQE"); 5709 "Embed NVME Command in WQE");
5710 5710
5711/* 5711/*
5712 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5713 * the driver will advertise it supports to the SCSI layer.
5714 *
5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
5717 *
5718 * Value range is [0,128]. Default value is 8.
5719 */
5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5722 "Set the number of SCSI Queues advertised");
5723
5724/*
5712 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5725 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5713 * will advertise it supports to the NVME and SCSI layers. This also 5726 * will advertise it supports to the NVME and SCSI layers. This also
5714 * will map to the number of CQ/WQ pairs the driver will create. 5727 * will map to the number of CQ/WQ pairs the driver will create.
@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
6030 &dev_attr_lpfc_cq_poll_threshold, 6043 &dev_attr_lpfc_cq_poll_threshold,
6031 &dev_attr_lpfc_cq_max_proc_limit, 6044 &dev_attr_lpfc_cq_max_proc_limit,
6032 &dev_attr_lpfc_fcp_cpu_map, 6045 &dev_attr_lpfc_fcp_cpu_map,
6046 &dev_attr_lpfc_fcp_mq_threshold,
6033 &dev_attr_lpfc_hdw_queue, 6047 &dev_attr_lpfc_hdw_queue,
6034 &dev_attr_lpfc_irq_chann, 6048 &dev_attr_lpfc_irq_chann,
6035 &dev_attr_lpfc_suppress_rsp, 6049 &dev_attr_lpfc_suppress_rsp,
@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7112 /* Initialize first burst. Target vs Initiator are different. */ 7126 /* Initialize first burst. Target vs Initiator are different. */
7113 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7127 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7114 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7128 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7129 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7115 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7130 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7116 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7131 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7117 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7132 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index faf43b1d3dbe..1ac98becb5ba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4309 shost->max_cmd_len = 16; 4309 shost->max_cmd_len = 16;
4310 4310
4311 if (phba->sli_rev == LPFC_SLI_REV4) { 4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4312 if (!phba->cfg_fcp_mq_threshold ||
4313 shost->nr_hw_queues = phba->cfg_hdw_queue; 4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4314 else 4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4315 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4315
4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4317 phba->cfg_fcp_mq_threshold);
4316 4318
4317 shost->dma_boundary = 4319 shost->dma_boundary =
4318 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -10776,12 +10778,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10776 /* This loop sets up all CPUs that are affinitized with a 10778 /* This loop sets up all CPUs that are affinitized with a
10777 * irq vector assigned to the driver. All affinitized CPUs 10779 * irq vector assigned to the driver. All affinitized CPUs
10778 * will get a link to that vectors IRQ and EQ. 10780 * will get a link to that vectors IRQ and EQ.
10781 *
10782 * NULL affinity mask handling:
10783 * If irq count is greater than one, log an error message.
10784 * If the null mask is received for the first irq, find the
10785 * first present cpu, and assign the eq index to ensure at
10786 * least one EQ is assigned.
10779 */ 10787 */
10780 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10788 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10781 /* Get a CPU mask for all CPUs affinitized to this vector */ 10789 /* Get a CPU mask for all CPUs affinitized to this vector */
10782 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10790 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10783 if (!maskp) 10791 if (!maskp) {
10784 continue; 10792 if (phba->cfg_irq_chann > 1)
10793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10794 "3329 No affinity mask found "
10795 "for vector %d (%d)\n",
10796 idx, phba->cfg_irq_chann);
10797 if (!idx) {
10798 cpu = cpumask_first(cpu_present_mask);
10799 cpup = &phba->sli4_hba.cpu_map[cpu];
10800 cpup->eq = idx;
10801 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10802 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10803 }
10804 break;
10805 }
10785 10806
10786 i = 0; 10807 i = 0;
10787 /* Loop through all CPUs associated with vector idx */ 10808 /* Loop through all CPUs associated with vector idx */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..329f7aa7e169 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
44#define LPFC_HBA_HDWQ_MAX 128 44#define LPFC_HBA_HDWQ_MAX 128
45#define LPFC_HBA_HDWQ_DEF 0 45#define LPFC_HBA_HDWQ_DEF 0
46 46
47/* FCP MQ queue count limiting */
48#define LPFC_FCP_MQ_THRESHOLD_MIN 0
49#define LPFC_FCP_MQ_THRESHOLD_MAX 128
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51
47/* Common buffer size to accomidate SCSI and NVME IO buffers */ 52/* Common buffer size to accomidate SCSI and NVME IO buffers */
48#define LPFC_COMMON_IO_BUF_SZ 768 53#define LPFC_COMMON_IO_BUF_SZ 768
49 54
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index b2339d04a700..f9f07935556e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3163,6 +3163,7 @@ fw_crash_buffer_show(struct device *cdev,
3163 (struct megasas_instance *) shost->hostdata; 3163 (struct megasas_instance *) shost->hostdata;
3164 u32 size; 3164 u32 size;
3165 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3165 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3166 unsigned long chunk_left_bytes;
3166 unsigned long src_addr; 3167 unsigned long src_addr;
3167 unsigned long flags; 3168 unsigned long flags;
3168 u32 buff_offset; 3169 u32 buff_offset;
@@ -3186,6 +3187,8 @@ fw_crash_buffer_show(struct device *cdev,
3186 } 3187 }
3187 3188
3188 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3189 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3190 chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3191 size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3189 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3192 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3190 3193
3191 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3194 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -8763,7 +8766,7 @@ static int __init megasas_init(void)
8763 8766
8764 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 8767 if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8765 (event_log_level > MFI_EVT_CLASS_DEAD)) { 8768 (event_log_level > MFI_EVT_CLASS_DEAD)) {
8766 printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 8769 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8767 event_log_level = MFI_EVT_CLASS_CRITICAL; 8770 event_log_level = MFI_EVT_CLASS_CRITICAL;
8768 } 8771 }
8769 8772
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a32b3f0fcd15..120e3c4de8c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -537,7 +537,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
537 return 0; 537 return 0;
538} 538}
539 539
540int 540static int
541megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 541megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
542{ 542{
543 u32 max_mpt_cmd, i, j; 543 u32 max_mpt_cmd, i, j;
@@ -576,7 +576,8 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
576 576
577 return 0; 577 return 0;
578} 578}
579int 579
580static int
580megasas_alloc_request_fusion(struct megasas_instance *instance) 581megasas_alloc_request_fusion(struct megasas_instance *instance)
581{ 582{
582 struct fusion_context *fusion; 583 struct fusion_context *fusion;
@@ -657,7 +658,7 @@ retry_alloc:
657 return 0; 658 return 0;
658} 659}
659 660
660int 661static int
661megasas_alloc_reply_fusion(struct megasas_instance *instance) 662megasas_alloc_reply_fusion(struct megasas_instance *instance)
662{ 663{
663 int i, count; 664 int i, count;
@@ -734,7 +735,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
734 return 0; 735 return 0;
735} 736}
736 737
737int 738static int
738megasas_alloc_rdpq_fusion(struct megasas_instance *instance) 739megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
739{ 740{
740 int i, j, k, msix_count; 741 int i, j, k, msix_count;
@@ -916,7 +917,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
916 * and is used as SMID of the cmd. 917 * and is used as SMID of the cmd.
917 * SMID value range is from 1 to max_fw_cmds. 918 * SMID value range is from 1 to max_fw_cmds.
918 */ 919 */
919int 920static int
920megasas_alloc_cmds_fusion(struct megasas_instance *instance) 921megasas_alloc_cmds_fusion(struct megasas_instance *instance)
921{ 922{
922 int i; 923 int i;
@@ -1736,7 +1737,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
1736 * 1737 *
1737 * This is the main function for initializing firmware. 1738 * This is the main function for initializing firmware.
1738 */ 1739 */
1739u32 1740static u32
1740megasas_init_adapter_fusion(struct megasas_instance *instance) 1741megasas_init_adapter_fusion(struct megasas_instance *instance)
1741{ 1742{
1742 struct fusion_context *fusion; 1743 struct fusion_context *fusion;
@@ -1962,7 +1963,7 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance)
1962 * @ext_status : ext status of cmd returned by FW 1963 * @ext_status : ext status of cmd returned by FW
1963 */ 1964 */
1964 1965
1965void 1966static void
1966map_cmd_status(struct fusion_context *fusion, 1967map_cmd_status(struct fusion_context *fusion,
1967 struct scsi_cmnd *scmd, u8 status, u8 ext_status, 1968 struct scsi_cmnd *scmd, u8 status, u8 ext_status,
1968 u32 data_length, u8 *sense) 1969 u32 data_length, u8 *sense)
@@ -2375,7 +2376,7 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
2375 * 2376 *
2376 * Used to set the PD LBA in CDB for FP IOs 2377 * Used to set the PD LBA in CDB for FP IOs
2377 */ 2378 */
2378void 2379static void
2379megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 2380megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
2380 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 2381 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
2381 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 2382 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
@@ -2714,7 +2715,7 @@ megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
2714 * Prepares the io_request and chain elements (sg_frame) for IO 2715 * Prepares the io_request and chain elements (sg_frame) for IO
2715 * The IO can be for PD (Fast Path) or LD 2716 * The IO can be for PD (Fast Path) or LD
2716 */ 2717 */
2717void 2718static void
2718megasas_build_ldio_fusion(struct megasas_instance *instance, 2719megasas_build_ldio_fusion(struct megasas_instance *instance,
2719 struct scsi_cmnd *scp, 2720 struct scsi_cmnd *scp,
2720 struct megasas_cmd_fusion *cmd) 2721 struct megasas_cmd_fusion *cmd)
@@ -3211,7 +3212,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
3211 * Invokes helper functions to prepare request frames 3212 * Invokes helper functions to prepare request frames
3212 * and sets flags appropriate for IO/Non-IO cmd 3213 * and sets flags appropriate for IO/Non-IO cmd
3213 */ 3214 */
3214int 3215static int
3215megasas_build_io_fusion(struct megasas_instance *instance, 3216megasas_build_io_fusion(struct megasas_instance *instance,
3216 struct scsi_cmnd *scp, 3217 struct scsi_cmnd *scp,
3217 struct megasas_cmd_fusion *cmd) 3218 struct megasas_cmd_fusion *cmd)
@@ -3325,9 +3326,9 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
3325/* megasas_prepate_secondRaid1_IO 3326/* megasas_prepate_secondRaid1_IO
3326 * It prepares the raid 1 second IO 3327 * It prepares the raid 1 second IO
3327 */ 3328 */
3328void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, 3329static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
3329 struct megasas_cmd_fusion *cmd, 3330 struct megasas_cmd_fusion *cmd,
3330 struct megasas_cmd_fusion *r1_cmd) 3331 struct megasas_cmd_fusion *r1_cmd)
3331{ 3332{
3332 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 3333 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
3333 struct fusion_context *fusion; 3334 struct fusion_context *fusion;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 684662888792..050c0f029ef9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2703{ 2703{
2704 u64 required_mask, coherent_mask; 2704 u64 required_mask, coherent_mask;
2705 struct sysinfo s; 2705 struct sysinfo s;
2706 /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2707 int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
2706 2708
2707 if (ioc->is_mcpu_endpoint) 2709 if (ioc->is_mcpu_endpoint)
2708 goto try_32bit; 2710 goto try_32bit;
@@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2712 goto try_32bit; 2714 goto try_32bit;
2713 2715
2714 if (ioc->dma_mask) 2716 if (ioc->dma_mask)
2715 coherent_mask = DMA_BIT_MASK(64); 2717 coherent_mask = DMA_BIT_MASK(dma_mask);
2716 else 2718 else
2717 coherent_mask = DMA_BIT_MASK(32); 2719 coherent_mask = DMA_BIT_MASK(32);
2718 2720
2719 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || 2721 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2720 dma_set_coherent_mask(&pdev->dev, coherent_mask)) 2722 dma_set_coherent_mask(&pdev->dev, coherent_mask))
2721 goto try_32bit; 2723 goto try_32bit;
2722 2724
2723 ioc->base_add_sg_single = &_base_add_sg_single_64; 2725 ioc->base_add_sg_single = &_base_add_sg_single_64;
2724 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 2726 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2725 ioc->dma_mask = 64; 2727 ioc->dma_mask = dma_mask;
2726 goto out; 2728 goto out;
2727 2729
2728 try_32bit: 2730 try_32bit:
@@ -2744,7 +2746,7 @@ static int
2744_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, 2746_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2745 struct pci_dev *pdev) 2747 struct pci_dev *pdev)
2746{ 2748{
2747 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 2749 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
2748 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2750 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2749 return -ENODEV; 2751 return -ENODEV;
2750 } 2752 }
@@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4989 total_sz += sz; 4991 total_sz += sz;
4990 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); 4992 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4991 4993
4992 if (ioc->dma_mask == 64) { 4994 if (ioc->dma_mask > 32) {
4993 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { 4995 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
4994 ioc_warn(ioc, "no suitable consistent DMA mask for %s\n", 4996 ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
4995 pci_name(ioc->pdev)); 4997 pci_name(ioc->pdev));
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8d560c562e9c..6b7b390b2e52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2957 vha->gnl.ldma); 2957 vha->gnl.ldma);
2958 2958
2959 vha->gnl.l = NULL;
2960
2959 vfree(vha->scan.l); 2961 vfree(vha->scan.l);
2960 2962
2961 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2963 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4059655639d9..da83034d4759 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4877 ql_log(ql_log_warn, vha, 0xd049, 4877 ql_log(ql_log_warn, vha, 0xd049,
4878 "Failed to allocate ct_sns request.\n"); 4878 "Failed to allocate ct_sns request.\n");
4879 kfree(fcport); 4879 kfree(fcport);
4880 fcport = NULL; 4880 return NULL;
4881 } 4881 }
4882 4882
4883 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 4883 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2e58cff9d200..98e60a34afd9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3440,6 +3440,12 @@ skip_dpc:
3440 return 0; 3440 return 0;
3441 3441
3442probe_failed: 3442probe_failed:
3443 if (base_vha->gnl.l) {
3444 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3445 base_vha->gnl.l, base_vha->gnl.ldma);
3446 base_vha->gnl.l = NULL;
3447 }
3448
3443 if (base_vha->timer_active) 3449 if (base_vha->timer_active)
3444 qla2x00_stop_timer(base_vha); 3450 qla2x00_stop_timer(base_vha);
3445 base_vha->flags.online = 0; 3451 base_vha->flags.online = 0;
@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3673 if (!atomic_read(&pdev->enable_cnt)) { 3679 if (!atomic_read(&pdev->enable_cnt)) {
3674 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3680 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3675 base_vha->gnl.l, base_vha->gnl.ldma); 3681 base_vha->gnl.l, base_vha->gnl.ldma);
3676 3682 base_vha->gnl.l = NULL;
3677 scsi_host_put(base_vha->host); 3683 scsi_host_put(base_vha->host);
3678 kfree(ha); 3684 kfree(ha);
3679 pci_set_drvdata(pdev, NULL); 3685 pci_set_drvdata(pdev, NULL);
@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3713 dma_free_coherent(&ha->pdev->dev, 3719 dma_free_coherent(&ha->pdev->dev,
3714 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3720 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3715 3721
3722 base_vha->gnl.l = NULL;
3723
3716 vfree(base_vha->scan.l); 3724 vfree(base_vha->scan.l);
3717 3725
3718 if (IS_QLAFX00(ha)) 3726 if (IS_QLAFX00(ha))
@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4816 "Alloc failed for scan database.\n"); 4824 "Alloc failed for scan database.\n");
4817 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4825 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4818 vha->gnl.l, vha->gnl.ldma); 4826 vha->gnl.l, vha->gnl.ldma);
4827 vha->gnl.l = NULL;
4819 scsi_remove_host(vha->host); 4828 scsi_remove_host(vha->host);
4820 return NULL; 4829 return NULL;
4821 } 4830 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9381171c2fc0..11e64b50497f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1784,8 +1784,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1784 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 1784 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1785 } 1785 }
1786 1786
1787 shost->max_sectors = min_t(unsigned int, shost->max_sectors, 1787 if (dev->dma_mask) {
1788 dma_max_mapping_size(dev) << SECTOR_SHIFT); 1788 shost->max_sectors = min_t(unsigned int, shost->max_sectors,
1789 dma_max_mapping_size(dev) >> SECTOR_SHIFT);
1790 }
1789 blk_queue_max_hw_sectors(q, shost->max_sectors); 1791 blk_queue_max_hw_sectors(q, shost->max_sectors);
1790 if (shost->unchecked_isa_dma) 1792 if (shost->unchecked_isa_dma)
1791 blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); 1793 blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e274053109d0..029da74bb2f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7063 struct ufs_vreg *vreg) 7063 struct ufs_vreg *vreg)
7064{ 7064{
7065 if (!vreg)
7066 return 0;
7067
7065 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 7068 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7066} 7069}
7067 7070
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 62c6ba17991a..c9519e62308c 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -419,7 +419,7 @@ static void qe_upload_microcode(const void *base,
419/* 419/*
420 * Upload a microcode to the I-RAM at a specific address. 420 * Upload a microcode to the I-RAM at a specific address.
421 * 421 *
422 * See Documentation/powerpc/qe_firmware.txt for information on QE microcode 422 * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
423 * uploading. 423 * uploading.
424 * 424 *
425 * Currently, only version 1 is supported, so the 'version' field must be 425 * Currently, only version 1 is supported, so the 'version' field must be
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
index de2e62c3310a..e3eb19b85fa4 100644
--- a/drivers/soc/ixp4xx/Kconfig
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -1,4 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2if ARCH_IXP4XX || COMPILE_TEST
3
2menu "IXP4xx SoC drivers" 4menu "IXP4xx SoC drivers"
3 5
4config IXP4XX_QMGR 6config IXP4XX_QMGR
@@ -15,3 +17,5 @@ config IXP4XX_NPE
15 and is automatically selected by Ethernet and HSS drivers. 17 and is automatically selected by Ethernet and HSS drivers.
16 18
17endmenu 19endmenu
20
21endif
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index bb77c220b6f8..ccc6d53fe788 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void)
141} 141}
142 142
143#ifdef CONFIG_SUSPEND 143#ifdef CONFIG_SUSPEND
144struct wkup_m3_wakeup_src rtc_wake_src(void) 144static struct wkup_m3_wakeup_src rtc_wake_src(void)
145{ 145{
146 u32 i; 146 u32 i;
147 147
@@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void)
157 return rtc_ext_wakeup; 157 return rtc_ext_wakeup;
158} 158}
159 159
160int am33xx_rtc_only_idle(unsigned long wfi_flags) 160static int am33xx_rtc_only_idle(unsigned long wfi_flags)
161{ 161{
162 omap_rtc_power_off_program(&omap_rtc->dev); 162 omap_rtc_power_off_program(&omap_rtc->dev);
163 am33xx_do_wfi_sram(wfi_flags); 163 am33xx_do_wfi_sram(wfi_flags);
@@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state)
252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { 252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
253 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 253 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
254 "omap_rtc_scratch0"); 254 "omap_rtc_scratch0");
255 if (nvmem) 255 if (!IS_ERR(nvmem))
256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
257 (void *)&rtc_magic_val); 257 (void *)&rtc_magic_val);
258 rtc_only_idle = 1; 258 rtc_only_idle = 1;
@@ -278,9 +278,12 @@ static void am33xx_pm_end(void)
278 struct nvmem_device *nvmem; 278 struct nvmem_device *nvmem;
279 279
280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); 280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
281 if (IS_ERR(nvmem))
282 return;
283
281 m3_ipc->ops->finish_low_power(m3_ipc); 284 m3_ipc->ops->finish_low_power(m3_ipc);
282 if (rtc_only_idle) { 285 if (rtc_only_idle) {
283 if (retrigger_irq) 286 if (retrigger_irq) {
284 /* 287 /*
285 * 32 bits of Interrupt Set-Pending correspond to 32 288 * 32 bits of Interrupt Set-Pending correspond to 32
286 * 32 interrupts. Compute the bit offset of the 289 * 32 interrupts. Compute the bit offset of the
@@ -291,8 +294,10 @@ static void am33xx_pm_end(void)
291 writel_relaxed(1 << (retrigger_irq & 31), 294 writel_relaxed(1 << (retrigger_irq & 31),
292 gic_dist_base + GIC_INT_SET_PENDING_BASE 295 gic_dist_base + GIC_INT_SET_PENDING_BASE
293 + retrigger_irq / 32 * 4); 296 + retrigger_irq / 32 * 4);
294 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 297 }
295 (void *)&val); 298
299 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
300 (void *)&val);
296 } 301 }
297 302
298 rtc_only_idle = 0; 303 rtc_only_idle = 0;
@@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void)
415 420
416 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 421 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
417 "omap_rtc_scratch0"); 422 "omap_rtc_scratch0");
418 if (nvmem) { 423 if (!IS_ERR(nvmem)) {
419 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 424 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
420 4, (void *)&rtc_magic_val); 425 4, (void *)&rtc_magic_val);
421 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) 426 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 3a01cfd70fdc..f518273cfbe3 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
4# 4#
5 5
6menuconfig SOUNDWIRE 6menuconfig SOUNDWIRE
7 bool "SoundWire support" 7 tristate "SoundWire support"
8 help 8 help
9 SoundWire is a 2-Pin interface with data and clock line ratified 9 SoundWire is a 2-Pin interface with data and clock line ratified
10 by the MIPI Alliance. SoundWire is used for transporting data 10 by the MIPI Alliance. SoundWire is used for transporting data
@@ -17,17 +17,12 @@ if SOUNDWIRE
17 17
18comment "SoundWire Devices" 18comment "SoundWire Devices"
19 19
20config SOUNDWIRE_BUS
21 tristate
22 select REGMAP_SOUNDWIRE
23
24config SOUNDWIRE_CADENCE 20config SOUNDWIRE_CADENCE
25 tristate 21 tristate
26 22
27config SOUNDWIRE_INTEL 23config SOUNDWIRE_INTEL
28 tristate "Intel SoundWire Master driver" 24 tristate "Intel SoundWire Master driver"
29 select SOUNDWIRE_CADENCE 25 select SOUNDWIRE_CADENCE
30 select SOUNDWIRE_BUS
31 depends on X86 && ACPI && SND_SOC 26 depends on X86 && ACPI && SND_SOC
32 help 27 help
33 SoundWire Intel Master driver. 28 SoundWire Intel Master driver.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index fd99a831b92a..45b7e5001653 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -5,7 +5,7 @@
5 5
6#Bus Objs 6#Bus Objs
7soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o 7soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
8obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o 8obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
9 9
10#Cadence Objs 10#Cadence Objs
11soundwire-cadence-objs := cadence_master.o 11soundwire-cadence-objs := cadence_master.o
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ff4badc9b3de..60e8bdee5c75 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -81,8 +81,8 @@
81 81
82#define CDNS_MCP_INTSET 0x4C 82#define CDNS_MCP_INTSET 0x4C
83 83
84#define CDNS_SDW_SLAVE_STAT 0x50 84#define CDNS_MCP_SLAVE_STAT 0x50
85#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0) 85#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
86 86
87#define CDNS_MCP_SLAVE_INTSTAT0 0x54 87#define CDNS_MCP_SLAVE_INTSTAT0 0x54
88#define CDNS_MCP_SLAVE_INTSTAT1 0x58 88#define CDNS_MCP_SLAVE_INTSTAT1 0x58
@@ -96,8 +96,8 @@
96#define CDNS_MCP_SLAVE_INTMASK0 0x5C 96#define CDNS_MCP_SLAVE_INTMASK0 0x5C
97#define CDNS_MCP_SLAVE_INTMASK1 0x60 97#define CDNS_MCP_SLAVE_INTMASK1 0x60
98 98
99#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0) 99#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
100#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0) 100#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
101 101
102#define CDNS_MCP_PORT_INTSTAT 0x64 102#define CDNS_MCP_PORT_INTSTAT 0x64
103#define CDNS_MCP_PDI_STAT 0x6C 103#define CDNS_MCP_PDI_STAT 0x6C
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 6f243a90c844..840b1b8ff3dc 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -834,7 +834,8 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
834 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); 834 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
835 835
836 /* handle all the 3-wire mode */ 836 /* handle all the 3-wire mode */
837 if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) 837 if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
838 tfr->rx_buf != ctlr->dummy_rx)
838 cs |= BCM2835_SPI_CS_REN; 839 cs |= BCM2835_SPI_CS_REN;
839 else 840 else
840 cs &= ~BCM2835_SPI_CS_REN; 841 cs &= ~BCM2835_SPI_CS_REN;
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 41a49b93ca60..448c00e4065b 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -206,7 +206,7 @@ static const struct fsl_qspi_devtype_data imx6sx_data = {
206}; 206};
207 207
208static const struct fsl_qspi_devtype_data imx7d_data = { 208static const struct fsl_qspi_devtype_data imx7d_data = {
209 .rxfifo = SZ_512, 209 .rxfifo = SZ_128,
210 .txfifo = SZ_512, 210 .txfifo = SZ_512,
211 .ahb_buf_size = SZ_1K, 211 .ahb_buf_size = SZ_1K,
212 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, 212 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index eca9d52ecf65..9eb82150666e 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -410,6 +410,12 @@ static int spi_gpio_probe(struct platform_device *pdev)
410 410
411 bb = &spi_gpio->bitbang; 411 bb = &spi_gpio->bitbang;
412 bb->master = master; 412 bb->master = master;
413 /*
414 * There is some additional business, apart from driving the CS GPIO
415 * line, that we need to do on selection. This makes the local
416 * callback for chipselect always get called.
417 */
418 master->flags |= SPI_MASTER_GPIO_SS;
413 bb->chipselect = spi_gpio_chipselect; 419 bb->chipselect = spi_gpio_chipselect;
414 bb->set_line_direction = spi_gpio_set_direction; 420 bb->set_line_direction = spi_gpio_set_direction;
415 421
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fc7ab4b26880..bb6a14d1ab0f 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1457,6 +1457,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
1457 { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP }, 1457 { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
1458 { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP }, 1458 { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
1459 { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP }, 1459 { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
1460 /* TGL-LP */
1461 { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
1462 { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
1463 { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
1464 { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
1465 { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
1466 { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
1467 { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
1460 { }, 1468 { },
1461}; 1469};
1462 1470
@@ -1831,14 +1839,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1831 status = devm_spi_register_controller(&pdev->dev, controller); 1839 status = devm_spi_register_controller(&pdev->dev, controller);
1832 if (status != 0) { 1840 if (status != 0) {
1833 dev_err(&pdev->dev, "problem registering spi controller\n"); 1841 dev_err(&pdev->dev, "problem registering spi controller\n");
1834 goto out_error_clock_enabled; 1842 goto out_error_pm_runtime_enabled;
1835 } 1843 }
1836 1844
1837 return status; 1845 return status;
1838 1846
1839out_error_clock_enabled: 1847out_error_pm_runtime_enabled:
1840 pm_runtime_put_noidle(&pdev->dev); 1848 pm_runtime_put_noidle(&pdev->dev);
1841 pm_runtime_disable(&pdev->dev); 1849 pm_runtime_disable(&pdev->dev);
1850
1851out_error_clock_enabled:
1842 clk_disable_unprepare(ssp->clk); 1852 clk_disable_unprepare(ssp->clk);
1843 1853
1844out_error_dma_irq_alloc: 1854out_error_dma_irq_alloc:
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index fd4995fb676e..f85ec5b16b65 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -8,11 +8,14 @@
8#include <linux/list.h> 8#include <linux/list.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/swap.h> 10#include <linux/swap.h>
11#include <linux/sched/signal.h>
11 12
12#include "ion.h" 13#include "ion.h"
13 14
14static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) 15static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
15{ 16{
17 if (fatal_signal_pending(current))
18 return NULL;
16 return alloc_pages(pool->gfp_mask, pool->order); 19 return alloc_pages(pool->gfp_mask, pool->order);
17} 20}
18 21
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee91300..caf4d4df4bd3 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
342static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, 342static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
343 unsigned int flags) 343 unsigned int flags)
344{ 344{
345 int divider, base, prescale; 345 unsigned int divider, base, prescale;
346 346
347 /* This function needs improvment */ 347 /* This function needs improvement */
348 /* Don't know if divider==0 works. */ 348 /* Don't know if divider==0 works. */
349 349
350 for (prescale = 0; prescale < 16; prescale++) { 350 for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
358 divider = (*nanosec) / base; 358 divider = (*nanosec) / base;
359 break; 359 break;
360 case CMDF_ROUND_UP: 360 case CMDF_ROUND_UP:
361 divider = (*nanosec) / base; 361 divider = DIV_ROUND_UP(*nanosec, base);
362 break; 362 break;
363 } 363 }
364 if (divider < 65536) { 364 if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
368 } 368 }
369 369
370 prescale = 15; 370 prescale = 15;
371 base = timer_base * (1 << prescale); 371 base = timer_base * (prescale + 1);
372 divider = 65535; 372 divider = 65535;
373 *nanosec = divider * base; 373 *nanosec = divider * base;
374 return (prescale << 16) | (divider); 374 return (prescale << 16) | (divider);
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
index b6c6d66e4eb1..e2c7646588f8 100644
--- a/drivers/staging/fbtft/fb_bd663474.c
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -24,7 +24,7 @@
24 24
25static int init_display(struct fbtft_par *par) 25static int init_display(struct fbtft_par *par)
26{ 26{
27 if (!par->gpio.cs) 27 if (par->gpio.cs)
28 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 28 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
29 29
30 par->fbtftops.reset(par); 30 par->fbtftops.reset(par);
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index d609a2b67db9..fd32376700e2 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -77,7 +77,7 @@ static int init_display(struct fbtft_par *par)
77{ 77{
78 par->fbtftops.reset(par); 78 par->fbtftops.reset(par);
79 79
80 if (!par->gpio.cs) 80 if (par->gpio.cs)
81 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 81 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
82 82
83 write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */ 83 write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index b090e7ab6fdd..85e54a10ed72 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -85,7 +85,7 @@ static int init_display(struct fbtft_par *par)
85{ 85{
86 par->fbtftops.reset(par); 86 par->fbtftops.reset(par);
87 87
88 if (!par->gpio.cs) 88 if (par->gpio.cs)
89 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 89 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
90 90
91 bt &= 0x07; 91 bt &= 0x07;
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index b3d0701880fe..5a129b1352cc 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -29,7 +29,7 @@ static int init_display(struct fbtft_par *par)
29{ 29{
30 par->fbtftops.reset(par); 30 par->fbtftops.reset(par);
31 31
32 if (!par->gpio.cs) 32 if (par->gpio.cs)
33 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 33 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
34 34
35 /* Initialization sequence from Lib_UTFT */ 35 /* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index bbf75f795234..88a5b6925901 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -28,7 +28,7 @@ static int init_display(struct fbtft_par *par)
28{ 28{
29 par->fbtftops.reset(par); 29 par->fbtftops.reset(par);
30 30
31 if (!par->gpio.cs) 31 if (par->gpio.cs)
32 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 32 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
33 33
34 write_reg(par, 0x00, 0x0001); 34 write_reg(par, 0x00, 0x0001);
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 4cfe9f8535d0..37622c9462aa 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -81,7 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
81 va_start(args, len); 81 va_start(args, len);
82 82
83 *buf = (u8)va_arg(args, unsigned int); 83 *buf = (u8)va_arg(args, unsigned int);
84 if (!par->gpio.dc) 84 if (par->gpio.dc)
85 gpiod_set_value(par->gpio.dc, 0); 85 gpiod_set_value(par->gpio.dc, 0);
86 ret = par->fbtftops.write(par, par->buf, sizeof(u8)); 86 ret = par->fbtftops.write(par, par->buf, sizeof(u8));
87 if (ret < 0) { 87 if (ret < 0) {
@@ -104,7 +104,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
104 return; 104 return;
105 } 105 }
106 } 106 }
107 if (!par->gpio.dc) 107 if (par->gpio.dc)
108 gpiod_set_value(par->gpio.dc, 1); 108 gpiod_set_value(par->gpio.dc, 1);
109 va_end(args); 109 va_end(args);
110} 110}
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
index 564a38e34440..c77832ae5e5b 100644
--- a/drivers/staging/fbtft/fb_upd161704.c
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -26,7 +26,7 @@ static int init_display(struct fbtft_par *par)
26{ 26{
27 par->fbtftops.reset(par); 27 par->fbtftops.reset(par);
28 28
29 if (!par->gpio.cs) 29 if (par->gpio.cs)
30 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 30 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
31 31
32 /* Initialization sequence from Lib_UTFT */ 32 /* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 2ea814d0dca5..63c65dd67b17 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -135,7 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
135 remain = len / 2; 135 remain = len / 2;
136 vmem16 = (u16 *)(par->info->screen_buffer + offset); 136 vmem16 = (u16 *)(par->info->screen_buffer + offset);
137 137
138 if (!par->gpio.dc) 138 if (par->gpio.dc)
139 gpiod_set_value(par->gpio.dc, 1); 139 gpiod_set_value(par->gpio.dc, 1);
140 140
141 /* non buffered write */ 141 /* non buffered write */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 7cbc1bdd2d8a..cf5700a2ea66 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -76,21 +76,18 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
76 struct gpio_desc **gpiop) 76 struct gpio_desc **gpiop)
77{ 77{
78 struct device *dev = par->info->device; 78 struct device *dev = par->info->device;
79 struct device_node *node = dev->of_node;
80 int ret = 0; 79 int ret = 0;
81 80
82 if (of_find_property(node, name, NULL)) { 81 *gpiop = devm_gpiod_get_index_optional(dev, name, index,
83 *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index, 82 GPIOD_OUT_HIGH);
84 GPIOD_OUT_HIGH); 83 if (IS_ERR(*gpiop)) {
85 if (IS_ERR(*gpiop)) { 84 ret = PTR_ERR(*gpiop);
86 ret = PTR_ERR(*gpiop); 85 dev_err(dev,
87 dev_err(dev, 86 "Failed to request %s GPIO: %d\n", name, ret);
88 "Failed to request %s GPIO:%d\n", name, ret); 87 return ret;
89 return ret;
90 }
91 fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
92 __func__, name);
93 } 88 }
89 fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
90 __func__, name);
94 91
95 return ret; 92 return ret;
96} 93}
@@ -103,34 +100,34 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
103 if (!par->info->device->of_node) 100 if (!par->info->device->of_node)
104 return -EINVAL; 101 return -EINVAL;
105 102
106 ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset); 103 ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
107 if (ret) 104 if (ret)
108 return ret; 105 return ret;
109 ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc); 106 ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc);
110 if (ret) 107 if (ret)
111 return ret; 108 return ret;
112 ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd); 109 ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd);
113 if (ret) 110 if (ret)
114 return ret; 111 return ret;
115 ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr); 112 ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr);
116 if (ret) 113 if (ret)
117 return ret; 114 return ret;
118 ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs); 115 ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs);
119 if (ret) 116 if (ret)
120 return ret; 117 return ret;
121 ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch); 118 ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch);
122 if (ret) 119 if (ret)
123 return ret; 120 return ret;
124 for (i = 0; i < 16; i++) { 121 for (i = 0; i < 16; i++) {
125 ret = fbtft_request_one_gpio(par, "db-gpios", i, 122 ret = fbtft_request_one_gpio(par, "db", i,
126 &par->gpio.db[i]); 123 &par->gpio.db[i]);
127 if (ret) 124 if (ret)
128 return ret; 125 return ret;
129 ret = fbtft_request_one_gpio(par, "led-gpios", i, 126 ret = fbtft_request_one_gpio(par, "led", i,
130 &par->gpio.led[i]); 127 &par->gpio.led[i]);
131 if (ret) 128 if (ret)
132 return ret; 129 return ret;
133 ret = fbtft_request_one_gpio(par, "aux-gpios", i, 130 ret = fbtft_request_one_gpio(par, "aux", i,
134 &par->gpio.aux[i]); 131 &par->gpio.aux[i]);
135 if (ret) 132 if (ret)
136 return ret; 133 return ret;
@@ -234,9 +231,9 @@ static void fbtft_reset(struct fbtft_par *par)
234 if (!par->gpio.reset) 231 if (!par->gpio.reset)
235 return; 232 return;
236 fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__); 233 fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
237 gpiod_set_value_cansleep(par->gpio.reset, 0);
238 usleep_range(20, 40);
239 gpiod_set_value_cansleep(par->gpio.reset, 1); 234 gpiod_set_value_cansleep(par->gpio.reset, 1);
235 usleep_range(20, 40);
236 gpiod_set_value_cansleep(par->gpio.reset, 0);
240 msleep(120); 237 msleep(120);
241} 238}
242 239
@@ -921,7 +918,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
921 return -EINVAL; 918 return -EINVAL;
922 919
923 par->fbtftops.reset(par); 920 par->fbtftops.reset(par);
924 if (!par->gpio.cs) 921 if (par->gpio.cs)
925 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 922 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
926 923
927 while (p) { 924 while (p) {
@@ -1012,7 +1009,7 @@ int fbtft_init_display(struct fbtft_par *par)
1012 } 1009 }
1013 1010
1014 par->fbtftops.reset(par); 1011 par->fbtftops.reset(par);
1015 if (!par->gpio.cs) 1012 if (par->gpio.cs)
1016 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ 1013 gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
1017 1014
1018 i = 0; 1015 i = 0;
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index 2be45ee9d061..464648ee2036 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -532,7 +532,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
532 break; 532 break;
533 case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE: 533 case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
534 ret = scnprintf(buf, PAGE_SIZE, "%u\n", 534 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
535 gasket_page_table_num_entries( 535 gasket_page_table_num_simple_entries(
536 gasket_dev->page_table[0])); 536 gasket_dev->page_table[0]));
537 break; 537 break;
538 case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES: 538 case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 9d4f1dab0968..40dd573e73c3 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1750,7 +1750,8 @@ static int visornic_poll(struct napi_struct *napi, int budget)
1750} 1750}
1751 1751
1752/* poll_for_irq - checks the status of the response queue 1752/* poll_for_irq - checks the status of the response queue
1753 * @v: Void pointer to the visronic devdata struct. 1753 * @t: pointer to the 'struct timer_list' from which we can retrieve the
1754 * the visornic devdata struct.
1754 * 1755 *
1755 * Main function of the vnic_incoming thread. Periodically check the response 1756 * Main function of the vnic_incoming thread. Periodically check the response
1756 * queue and drain it if needed. 1757 * queue and drain it if needed.
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index d72fdd333050..736eedef23b6 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1969,6 +1969,7 @@ void wilc_deinit_host_int(struct net_device *net)
1969 1969
1970 priv->p2p_listen_state = false; 1970 priv->p2p_listen_state = false;
1971 1971
1972 flush_workqueue(vif->wilc->hif_workqueue);
1972 mutex_destroy(&priv->scan_req_lock); 1973 mutex_destroy(&priv->scan_req_lock);
1973 ret = wilc_deinit(vif); 1974 ret = wilc_deinit(vif);
1974 1975
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 22dd4c457d6a..c70caf4ea490 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -875,10 +875,12 @@ static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
875 return 0; 875 return 0;
876 876
877 if (caps & DCB_CAP_DCBX_VER_IEEE) { 877 if (caps & DCB_CAP_DCBX_VER_IEEE) {
878 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; 878 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
879
880 ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); 879 ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
881 880 if (!ret) {
881 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
882 ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
883 }
882 } else if (caps & DCB_CAP_DCBX_VER_CEE) { 884 } else if (caps & DCB_CAP_DCBX_VER_CEE) {
883 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; 885 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
884 886
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 343b129c2cfa..e877b917c15f 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -589,7 +589,8 @@ static void cxgbit_dcb_workfn(struct work_struct *work)
589 iscsi_app = &dcb_work->dcb_app; 589 iscsi_app = &dcb_work->dcb_app;
590 590
591 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { 591 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
592 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) 592 if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
593 (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
593 goto out; 594 goto out;
594 595
595 priority = iscsi_app->app.priority; 596 priority = iscsi_app->app.priority;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 04eda111920e..661bb9358364 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1132 struct se_cmd *se_cmd = cmd->se_cmd; 1132 struct se_cmd *se_cmd = cmd->se_cmd;
1133 struct tcmu_dev *udev = cmd->tcmu_dev; 1133 struct tcmu_dev *udev = cmd->tcmu_dev;
1134 bool read_len_valid = false; 1134 bool read_len_valid = false;
1135 uint32_t read_len = se_cmd->data_length; 1135 uint32_t read_len;
1136 1136
1137 /* 1137 /*
1138 * cmd has been completed already from timeout, just reclaim 1138 * cmd has been completed already from timeout, just reclaim
1139 * data area space and free cmd 1139 * data area space and free cmd
1140 */ 1140 */
1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142 WARN_ON_ONCE(se_cmd);
1142 goto out; 1143 goto out;
1144 }
1143 1145
1144 list_del_init(&cmd->queue_entry); 1146 list_del_init(&cmd->queue_entry);
1145 1147
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1152 goto done; 1154 goto done;
1153 } 1155 }
1154 1156
1157 read_len = se_cmd->data_length;
1155 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1158 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1156 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1159 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1157 read_len_valid = true; 1160 read_len_valid = true;
@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1307 */ 1310 */
1308 scsi_status = SAM_STAT_CHECK_CONDITION; 1311 scsi_status = SAM_STAT_CHECK_CONDITION;
1309 list_del_init(&cmd->queue_entry); 1312 list_del_init(&cmd->queue_entry);
1313 cmd->se_cmd = NULL;
1310 } else { 1314 } else {
1311 list_del_init(&cmd->queue_entry); 1315 list_del_init(&cmd->queue_entry);
1312 idr_remove(&udev->commands, id); 1316 idr_remove(&udev->commands, id);
@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2022 2026
2023 idr_remove(&udev->commands, i); 2027 idr_remove(&udev->commands, i);
2024 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2028 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2029 WARN_ON(!cmd->se_cmd);
2025 list_del_init(&cmd->queue_entry); 2030 list_del_init(&cmd->queue_entry);
2026 if (err_level == 1) { 2031 if (err_level == 1) {
2027 /* 2032 /*
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 213ab3cc6b80..d3446acf9bbd 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -487,6 +487,7 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev,
487 rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); 487 rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep);
488 if (ret < 0) { 488 if (ret < 0) {
489 powercap_unregister_control_type(rapl_mmio_priv.control_type); 489 powercap_unregister_control_type(rapl_mmio_priv.control_type);
490 rapl_mmio_priv.control_type = NULL;
490 return ret; 491 return ret;
491 } 492 }
492 rapl_mmio_priv.pcap_rapl_online = ret; 493 rapl_mmio_priv.pcap_rapl_online = ret;
@@ -496,6 +497,9 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev,
496 497
497static void proc_thermal_rapl_remove(void) 498static void proc_thermal_rapl_remove(void)
498{ 499{
500 if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type))
501 return;
502
499 cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); 503 cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online);
500 powercap_unregister_control_type(rapl_mmio_priv.control_type); 504 powercap_unregister_control_type(rapl_mmio_priv.control_type);
501} 505}
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index cb4db1b3ca3c..5fb214e67d73 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -47,7 +47,7 @@
47 * using the 2.6 Linux kernel kref construct. 47 * using the 2.6 Linux kernel kref construct.
48 * 48 *
49 * For direction on installation and usage of this driver please reference 49 * For direction on installation and usage of this driver please reference
50 * Documentation/powerpc/hvcs.txt. 50 * Documentation/powerpc/hvcs.rst.
51 */ 51 */
52 52
53#include <linux/device.h> 53#include <linux/device.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index fd385c8c53a5..3083dbae35f7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1035,25 +1035,6 @@ config SERIAL_VT8500_CONSOLE
1035 depends on SERIAL_VT8500=y 1035 depends on SERIAL_VT8500=y
1036 select SERIAL_CORE_CONSOLE 1036 select SERIAL_CORE_CONSOLE
1037 1037
1038config SERIAL_NETX
1039 tristate "NetX serial port support"
1040 depends on ARCH_NETX
1041 select SERIAL_CORE
1042 help
1043 If you have a machine based on a Hilscher NetX SoC you
1044 can enable its onboard serial port by enabling this option.
1045
1046 To compile this driver as a module, choose M here: the
1047 module will be called netx-serial.
1048
1049config SERIAL_NETX_CONSOLE
1050 bool "Console on NetX serial port"
1051 depends on SERIAL_NETX=y
1052 select SERIAL_CORE_CONSOLE
1053 help
1054 If you have enabled the serial port on the Hilscher NetX SoC
1055 you can make it the console by answering Y to this option.
1056
1057config SERIAL_OMAP 1038config SERIAL_OMAP
1058 tristate "OMAP serial port support" 1039 tristate "OMAP serial port support"
1059 depends on ARCH_OMAP2PLUS 1040 depends on ARCH_OMAP2PLUS
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 7cd7cabfa6c4..15a0fccadf7e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
59obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o 59obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
60obj-$(CONFIG_SERIAL_MSM) += msm_serial.o 60obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
61obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o 61obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o
62obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
63obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o 62obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
64obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o 63obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
65obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o 64obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index bfe5e9e034ec..c7d51b51898f 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -277,10 +277,14 @@ static void kgdboc_pre_exp_handler(void)
277 /* Increment the module count when the debugger is active */ 277 /* Increment the module count when the debugger is active */
278 if (!kgdb_connected) 278 if (!kgdb_connected)
279 try_module_get(THIS_MODULE); 279 try_module_get(THIS_MODULE);
280
281 atomic_inc(&ignore_console_lock_warning);
280} 282}
281 283
282static void kgdboc_post_exp_handler(void) 284static void kgdboc_post_exp_handler(void)
283{ 285{
286 atomic_dec(&ignore_console_lock_warning);
287
284 /* decrement the module count when the debugger detaches */ 288 /* decrement the module count when the debugger detaches */
285 if (!kgdb_connected) 289 if (!kgdb_connected)
286 module_put(THIS_MODULE); 290 module_put(THIS_MODULE);
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
deleted file mode 100644
index b3556863491f..000000000000
--- a/drivers/tty/serial/netx-serial.c
+++ /dev/null
@@ -1,733 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
4 */
5
6#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
7#define SUPPORT_SYSRQ
8#endif
9
10#include <linux/device.h>
11#include <linux/module.h>
12#include <linux/ioport.h>
13#include <linux/init.h>
14#include <linux/console.h>
15#include <linux/sysrq.h>
16#include <linux/platform_device.h>
17#include <linux/tty.h>
18#include <linux/tty_flip.h>
19#include <linux/serial_core.h>
20#include <linux/serial.h>
21
22#include <asm/io.h>
23#include <asm/irq.h>
24#include <mach/hardware.h>
25#include <mach/netx-regs.h>
26
27/* We've been assigned a range on the "Low-density serial ports" major */
28#define SERIAL_NX_MAJOR 204
29#define MINOR_START 170
30
31enum uart_regs {
32 UART_DR = 0x00,
33 UART_SR = 0x04,
34 UART_LINE_CR = 0x08,
35 UART_BAUDDIV_MSB = 0x0c,
36 UART_BAUDDIV_LSB = 0x10,
37 UART_CR = 0x14,
38 UART_FR = 0x18,
39 UART_IIR = 0x1c,
40 UART_ILPR = 0x20,
41 UART_RTS_CR = 0x24,
42 UART_RTS_LEAD = 0x28,
43 UART_RTS_TRAIL = 0x2c,
44 UART_DRV_ENABLE = 0x30,
45 UART_BRM_CR = 0x34,
46 UART_RXFIFO_IRQLEVEL = 0x38,
47 UART_TXFIFO_IRQLEVEL = 0x3c,
48};
49
50#define SR_FE (1<<0)
51#define SR_PE (1<<1)
52#define SR_BE (1<<2)
53#define SR_OE (1<<3)
54
55#define LINE_CR_BRK (1<<0)
56#define LINE_CR_PEN (1<<1)
57#define LINE_CR_EPS (1<<2)
58#define LINE_CR_STP2 (1<<3)
59#define LINE_CR_FEN (1<<4)
60#define LINE_CR_5BIT (0<<5)
61#define LINE_CR_6BIT (1<<5)
62#define LINE_CR_7BIT (2<<5)
63#define LINE_CR_8BIT (3<<5)
64#define LINE_CR_BITS_MASK (3<<5)
65
66#define CR_UART_EN (1<<0)
67#define CR_SIREN (1<<1)
68#define CR_SIRLP (1<<2)
69#define CR_MSIE (1<<3)
70#define CR_RIE (1<<4)
71#define CR_TIE (1<<5)
72#define CR_RTIE (1<<6)
73#define CR_LBE (1<<7)
74
75#define FR_CTS (1<<0)
76#define FR_DSR (1<<1)
77#define FR_DCD (1<<2)
78#define FR_BUSY (1<<3)
79#define FR_RXFE (1<<4)
80#define FR_TXFF (1<<5)
81#define FR_RXFF (1<<6)
82#define FR_TXFE (1<<7)
83
84#define IIR_MIS (1<<0)
85#define IIR_RIS (1<<1)
86#define IIR_TIS (1<<2)
87#define IIR_RTIS (1<<3)
88#define IIR_MASK 0xf
89
90#define RTS_CR_AUTO (1<<0)
91#define RTS_CR_RTS (1<<1)
92#define RTS_CR_COUNT (1<<2)
93#define RTS_CR_MOD2 (1<<3)
94#define RTS_CR_RTS_POL (1<<4)
95#define RTS_CR_CTS_CTR (1<<5)
96#define RTS_CR_CTS_POL (1<<6)
97#define RTS_CR_STICK (1<<7)
98
99#define UART_PORT_SIZE 0x40
100#define DRIVER_NAME "netx-uart"
101
102struct netx_port {
103 struct uart_port port;
104};
105
106static void netx_stop_tx(struct uart_port *port)
107{
108 unsigned int val;
109 val = readl(port->membase + UART_CR);
110 writel(val & ~CR_TIE, port->membase + UART_CR);
111}
112
113static void netx_stop_rx(struct uart_port *port)
114{
115 unsigned int val;
116 val = readl(port->membase + UART_CR);
117 writel(val & ~CR_RIE, port->membase + UART_CR);
118}
119
120static void netx_enable_ms(struct uart_port *port)
121{
122 unsigned int val;
123 val = readl(port->membase + UART_CR);
124 writel(val | CR_MSIE, port->membase + UART_CR);
125}
126
127static inline void netx_transmit_buffer(struct uart_port *port)
128{
129 struct circ_buf *xmit = &port->state->xmit;
130
131 if (port->x_char) {
132 writel(port->x_char, port->membase + UART_DR);
133 port->icount.tx++;
134 port->x_char = 0;
135 return;
136 }
137
138 if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
139 netx_stop_tx(port);
140 return;
141 }
142
143 do {
144 /* send xmit->buf[xmit->tail]
145 * out the port here */
146 writel(xmit->buf[xmit->tail], port->membase + UART_DR);
147 xmit->tail = (xmit->tail + 1) &
148 (UART_XMIT_SIZE - 1);
149 port->icount.tx++;
150 if (uart_circ_empty(xmit))
151 break;
152 } while (!(readl(port->membase + UART_FR) & FR_TXFF));
153
154 if (uart_circ_empty(xmit))
155 netx_stop_tx(port);
156}
157
158static void netx_start_tx(struct uart_port *port)
159{
160 writel(
161 readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR);
162
163 if (!(readl(port->membase + UART_FR) & FR_TXFF))
164 netx_transmit_buffer(port);
165}
166
167static unsigned int netx_tx_empty(struct uart_port *port)
168{
169 return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT;
170}
171
172static void netx_txint(struct uart_port *port)
173{
174 struct circ_buf *xmit = &port->state->xmit;
175
176 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
177 netx_stop_tx(port);
178 return;
179 }
180
181 netx_transmit_buffer(port);
182
183 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
184 uart_write_wakeup(port);
185}
186
187static void netx_rxint(struct uart_port *port, unsigned long *flags)
188{
189 unsigned char rx, flg, status;
190
191 while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
192 rx = readl(port->membase + UART_DR);
193 flg = TTY_NORMAL;
194 port->icount.rx++;
195 status = readl(port->membase + UART_SR);
196 if (status & SR_BE) {
197 writel(0, port->membase + UART_SR);
198 if (uart_handle_break(port))
199 continue;
200 }
201
202 if (unlikely(status & (SR_FE | SR_PE | SR_OE))) {
203
204 if (status & SR_PE)
205 port->icount.parity++;
206 else if (status & SR_FE)
207 port->icount.frame++;
208 if (status & SR_OE)
209 port->icount.overrun++;
210
211 status &= port->read_status_mask;
212
213 if (status & SR_BE)
214 flg = TTY_BREAK;
215 else if (status & SR_PE)
216 flg = TTY_PARITY;
217 else if (status & SR_FE)
218 flg = TTY_FRAME;
219 }
220
221 if (uart_handle_sysrq_char(port, rx))
222 continue;
223
224 uart_insert_char(port, status, SR_OE, rx, flg);
225 }
226
227 spin_unlock_irqrestore(&port->lock, *flags);
228 tty_flip_buffer_push(&port->state->port);
229 spin_lock_irqsave(&port->lock, *flags);
230}
231
232static irqreturn_t netx_int(int irq, void *dev_id)
233{
234 struct uart_port *port = dev_id;
235 unsigned long flags;
236 unsigned char status;
237
238 spin_lock_irqsave(&port->lock,flags);
239
240 status = readl(port->membase + UART_IIR) & IIR_MASK;
241 while (status) {
242 if (status & IIR_RIS)
243 netx_rxint(port, &flags);
244 if (status & IIR_TIS)
245 netx_txint(port);
246 if (status & IIR_MIS) {
247 if (readl(port->membase + UART_FR) & FR_CTS)
248 uart_handle_cts_change(port, 1);
249 else
250 uart_handle_cts_change(port, 0);
251 }
252 writel(0, port->membase + UART_IIR);
253 status = readl(port->membase + UART_IIR) & IIR_MASK;
254 }
255
256 spin_unlock_irqrestore(&port->lock,flags);
257 return IRQ_HANDLED;
258}
259
260static unsigned int netx_get_mctrl(struct uart_port *port)
261{
262 unsigned int ret = TIOCM_DSR | TIOCM_CAR;
263
264 if (readl(port->membase + UART_FR) & FR_CTS)
265 ret |= TIOCM_CTS;
266
267 return ret;
268}
269
270static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
271{
272 unsigned int val;
273
274 /* FIXME: Locking needed ? */
275 if (mctrl & TIOCM_RTS) {
276 val = readl(port->membase + UART_RTS_CR);
277 writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
278 }
279}
280
281static void netx_break_ctl(struct uart_port *port, int break_state)
282{
283 unsigned int line_cr;
284 spin_lock_irq(&port->lock);
285
286 line_cr = readl(port->membase + UART_LINE_CR);
287 if (break_state != 0)
288 line_cr |= LINE_CR_BRK;
289 else
290 line_cr &= ~LINE_CR_BRK;
291 writel(line_cr, port->membase + UART_LINE_CR);
292
293 spin_unlock_irq(&port->lock);
294}
295
296static int netx_startup(struct uart_port *port)
297{
298 int ret;
299
300 ret = request_irq(port->irq, netx_int, 0,
301 DRIVER_NAME, port);
302 if (ret) {
303 dev_err(port->dev, "unable to grab irq%d\n",port->irq);
304 goto exit;
305 }
306
307 writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN,
308 port->membase + UART_LINE_CR);
309
310 writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN,
311 port->membase + UART_CR);
312
313exit:
314 return ret;
315}
316
317static void netx_shutdown(struct uart_port *port)
318{
319 writel(0, port->membase + UART_CR) ;
320
321 free_irq(port->irq, port);
322}
323
324static void
325netx_set_termios(struct uart_port *port, struct ktermios *termios,
326 struct ktermios *old)
327{
328 unsigned int baud, quot;
329 unsigned char old_cr;
330 unsigned char line_cr = LINE_CR_FEN;
331 unsigned char rts_cr = 0;
332
333 switch (termios->c_cflag & CSIZE) {
334 case CS5:
335 line_cr |= LINE_CR_5BIT;
336 break;
337 case CS6:
338 line_cr |= LINE_CR_6BIT;
339 break;
340 case CS7:
341 line_cr |= LINE_CR_7BIT;
342 break;
343 case CS8:
344 line_cr |= LINE_CR_8BIT;
345 break;
346 }
347
348 if (termios->c_cflag & CSTOPB)
349 line_cr |= LINE_CR_STP2;
350
351 if (termios->c_cflag & PARENB) {
352 line_cr |= LINE_CR_PEN;
353 if (!(termios->c_cflag & PARODD))
354 line_cr |= LINE_CR_EPS;
355 }
356
357 if (termios->c_cflag & CRTSCTS)
358 rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;
359
360 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
361 quot = baud * 4096;
362 quot /= 1000;
363 quot *= 256;
364 quot /= 100000;
365
366 spin_lock_irq(&port->lock);
367
368 uart_update_timeout(port, termios->c_cflag, baud);
369
370 old_cr = readl(port->membase + UART_CR);
371
372 /* disable interrupts */
373 writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
374 port->membase + UART_CR);
375
376 /* drain transmitter */
377 while (readl(port->membase + UART_FR) & FR_BUSY);
378
379 /* disable UART */
380 writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);
381
382 /* modem status interrupts */
383 old_cr &= ~CR_MSIE;
384 if (UART_ENABLE_MS(port, termios->c_cflag))
385 old_cr |= CR_MSIE;
386
387 writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
388 writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
389 writel(line_cr, port->membase + UART_LINE_CR);
390
391 writel(rts_cr, port->membase + UART_RTS_CR);
392
393 /*
394 * Characters to ignore
395 */
396 port->ignore_status_mask = 0;
397 if (termios->c_iflag & IGNPAR)
398 port->ignore_status_mask |= SR_PE;
399 if (termios->c_iflag & IGNBRK) {
400 port->ignore_status_mask |= SR_BE;
401 /*
402 * If we're ignoring parity and break indicators,
403 * ignore overruns too (for real raw support).
404 */
405 if (termios->c_iflag & IGNPAR)
406 port->ignore_status_mask |= SR_PE;
407 }
408
409 port->read_status_mask = 0;
410 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
411 port->read_status_mask |= SR_BE;
412 if (termios->c_iflag & INPCK)
413 port->read_status_mask |= SR_PE | SR_FE;
414
415 writel(old_cr, port->membase + UART_CR);
416
417 spin_unlock_irq(&port->lock);
418}
419
420static const char *netx_type(struct uart_port *port)
421{
422 return port->type == PORT_NETX ? "NETX" : NULL;
423}
424
425static void netx_release_port(struct uart_port *port)
426{
427 release_mem_region(port->mapbase, UART_PORT_SIZE);
428}
429
430static int netx_request_port(struct uart_port *port)
431{
432 return request_mem_region(port->mapbase, UART_PORT_SIZE,
433 DRIVER_NAME) != NULL ? 0 : -EBUSY;
434}
435
436static void netx_config_port(struct uart_port *port, int flags)
437{
438 if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0)
439 port->type = PORT_NETX;
440}
441
442static int
443netx_verify_port(struct uart_port *port, struct serial_struct *ser)
444{
445 int ret = 0;
446
447 if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX)
448 ret = -EINVAL;
449
450 return ret;
451}
452
453static struct uart_ops netx_pops = {
454 .tx_empty = netx_tx_empty,
455 .set_mctrl = netx_set_mctrl,
456 .get_mctrl = netx_get_mctrl,
457 .stop_tx = netx_stop_tx,
458 .start_tx = netx_start_tx,
459 .stop_rx = netx_stop_rx,
460 .enable_ms = netx_enable_ms,
461 .break_ctl = netx_break_ctl,
462 .startup = netx_startup,
463 .shutdown = netx_shutdown,
464 .set_termios = netx_set_termios,
465 .type = netx_type,
466 .release_port = netx_release_port,
467 .request_port = netx_request_port,
468 .config_port = netx_config_port,
469 .verify_port = netx_verify_port,
470};
471
472static struct netx_port netx_ports[] = {
473 {
474 .port = {
475 .type = PORT_NETX,
476 .iotype = UPIO_MEM,
477 .membase = (char __iomem *)io_p2v(NETX_PA_UART0),
478 .mapbase = NETX_PA_UART0,
479 .irq = NETX_IRQ_UART0,
480 .uartclk = 100000000,
481 .fifosize = 16,
482 .flags = UPF_BOOT_AUTOCONF,
483 .ops = &netx_pops,
484 .line = 0,
485 },
486 }, {
487 .port = {
488 .type = PORT_NETX,
489 .iotype = UPIO_MEM,
490 .membase = (char __iomem *)io_p2v(NETX_PA_UART1),
491 .mapbase = NETX_PA_UART1,
492 .irq = NETX_IRQ_UART1,
493 .uartclk = 100000000,
494 .fifosize = 16,
495 .flags = UPF_BOOT_AUTOCONF,
496 .ops = &netx_pops,
497 .line = 1,
498 },
499 }, {
500 .port = {
501 .type = PORT_NETX,
502 .iotype = UPIO_MEM,
503 .membase = (char __iomem *)io_p2v(NETX_PA_UART2),
504 .mapbase = NETX_PA_UART2,
505 .irq = NETX_IRQ_UART2,
506 .uartclk = 100000000,
507 .fifosize = 16,
508 .flags = UPF_BOOT_AUTOCONF,
509 .ops = &netx_pops,
510 .line = 2,
511 },
512 }
513};
514
515#ifdef CONFIG_SERIAL_NETX_CONSOLE
516
517static void netx_console_putchar(struct uart_port *port, int ch)
518{
519 while (readl(port->membase + UART_FR) & FR_BUSY);
520 writel(ch, port->membase + UART_DR);
521}
522
523static void
524netx_console_write(struct console *co, const char *s, unsigned int count)
525{
526 struct uart_port *port = &netx_ports[co->index].port;
527 unsigned char cr_save;
528
529 cr_save = readl(port->membase + UART_CR);
530 writel(cr_save | CR_UART_EN, port->membase + UART_CR);
531
532 uart_console_write(port, s, count, netx_console_putchar);
533
534 while (readl(port->membase + UART_FR) & FR_BUSY);
535 writel(cr_save, port->membase + UART_CR);
536}
537
538static void __init
539netx_console_get_options(struct uart_port *port, int *baud,
540 int *parity, int *bits, int *flow)
541{
542 unsigned char line_cr;
543
544 *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) |
545 readl(port->membase + UART_BAUDDIV_LSB);
546 *baud *= 1000;
547 *baud /= 4096;
548 *baud *= 1000;
549 *baud /= 256;
550 *baud *= 100;
551
552 line_cr = readl(port->membase + UART_LINE_CR);
553 *parity = 'n';
554 if (line_cr & LINE_CR_PEN) {
555 if (line_cr & LINE_CR_EPS)
556 *parity = 'e';
557 else
558 *parity = 'o';
559 }
560
561 switch (line_cr & LINE_CR_BITS_MASK) {
562 case LINE_CR_8BIT:
563 *bits = 8;
564 break;
565 case LINE_CR_7BIT:
566 *bits = 7;
567 break;
568 case LINE_CR_6BIT:
569 *bits = 6;
570 break;
571 case LINE_CR_5BIT:
572 *bits = 5;
573 break;
574 }
575
576 if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO)
577 *flow = 'r';
578}
579
580static int __init
581netx_console_setup(struct console *co, char *options)
582{
583 struct netx_port *sport;
584 int baud = 9600;
585 int bits = 8;
586 int parity = 'n';
587 int flow = 'n';
588
589 /*
590 * Check whether an invalid uart number has been specified, and
591 * if so, search for the first available port that does have
592 * console support.
593 */
594 if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports))
595 co->index = 0;
596 sport = &netx_ports[co->index];
597
598 if (options) {
599 uart_parse_options(options, &baud, &parity, &bits, &flow);
600 } else {
601 /* if the UART is enabled, assume it has been correctly setup
602 * by the bootloader and get the options
603 */
604 if (readl(sport->port.membase + UART_CR) & CR_UART_EN) {
605 netx_console_get_options(&sport->port, &baud,
606 &parity, &bits, &flow);
607 }
608
609 }
610
611 return uart_set_options(&sport->port, co, baud, parity, bits, flow);
612}
613
614static struct uart_driver netx_reg;
615static struct console netx_console = {
616 .name = "ttyNX",
617 .write = netx_console_write,
618 .device = uart_console_device,
619 .setup = netx_console_setup,
620 .flags = CON_PRINTBUFFER,
621 .index = -1,
622 .data = &netx_reg,
623};
624
625static int __init netx_console_init(void)
626{
627 register_console(&netx_console);
628 return 0;
629}
630console_initcall(netx_console_init);
631
632#define NETX_CONSOLE &netx_console
633#else
634#define NETX_CONSOLE NULL
635#endif
636
637static struct uart_driver netx_reg = {
638 .owner = THIS_MODULE,
639 .driver_name = DRIVER_NAME,
640 .dev_name = "ttyNX",
641 .major = SERIAL_NX_MAJOR,
642 .minor = MINOR_START,
643 .nr = ARRAY_SIZE(netx_ports),
644 .cons = NETX_CONSOLE,
645};
646
647static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state)
648{
649 struct netx_port *sport = platform_get_drvdata(pdev);
650
651 if (sport)
652 uart_suspend_port(&netx_reg, &sport->port);
653
654 return 0;
655}
656
657static int serial_netx_resume(struct platform_device *pdev)
658{
659 struct netx_port *sport = platform_get_drvdata(pdev);
660
661 if (sport)
662 uart_resume_port(&netx_reg, &sport->port);
663
664 return 0;
665}
666
667static int serial_netx_probe(struct platform_device *pdev)
668{
669 struct uart_port *port = &netx_ports[pdev->id].port;
670
671 dev_info(&pdev->dev, "initialising\n");
672
673 port->dev = &pdev->dev;
674
675 writel(1, port->membase + UART_RXFIFO_IRQLEVEL);
676 uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port);
677 platform_set_drvdata(pdev, &netx_ports[pdev->id]);
678
679 return 0;
680}
681
682static int serial_netx_remove(struct platform_device *pdev)
683{
684 struct netx_port *sport = platform_get_drvdata(pdev);
685
686 if (sport)
687 uart_remove_one_port(&netx_reg, &sport->port);
688
689 return 0;
690}
691
692static struct platform_driver serial_netx_driver = {
693 .probe = serial_netx_probe,
694 .remove = serial_netx_remove,
695
696 .suspend = serial_netx_suspend,
697 .resume = serial_netx_resume,
698
699 .driver = {
700 .name = DRIVER_NAME,
701 },
702};
703
704static int __init netx_serial_init(void)
705{
706 int ret;
707
708 printk(KERN_INFO "Serial: NetX driver\n");
709
710 ret = uart_register_driver(&netx_reg);
711 if (ret)
712 return ret;
713
714 ret = platform_driver_register(&serial_netx_driver);
715 if (ret != 0)
716 uart_unregister_driver(&netx_reg);
717
718 return 0;
719}
720
721static void __exit netx_serial_exit(void)
722{
723 platform_driver_unregister(&serial_netx_driver);
724 uart_unregister_driver(&netx_reg);
725}
726
727module_init(netx_serial_init);
728module_exit(netx_serial_exit);
729
730MODULE_AUTHOR("Sascha Hauer");
731MODULE_DESCRIPTION("NetX serial port driver");
732MODULE_LICENSE("GPL");
733MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 717292c1c0df..60ff236a3d63 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
93 93
94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { 94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
95 tsk = waiter->task; 95 tsk = waiter->task;
96 smp_mb(); 96 smp_store_release(&waiter->task, NULL);
97 waiter->task = NULL;
98 wake_up_process(tsk); 97 wake_up_process(tsk);
99 put_task_struct(tsk); 98 put_task_struct(tsk);
100 } 99 }
@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
194 for (;;) { 193 for (;;) {
195 set_current_state(TASK_UNINTERRUPTIBLE); 194 set_current_state(TASK_UNINTERRUPTIBLE);
196 195
197 if (!waiter.task) 196 if (!smp_load_acquire(&waiter.task))
198 break; 197 break;
199 if (!timeout) 198 if (!timeout)
200 break; 199 break;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ec92f36ab5c4..34aa39d1aed9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3771,7 +3771,11 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
3771 char *buf) 3771 char *buf)
3772{ 3772{
3773 struct con_driver *con = dev_get_drvdata(dev); 3773 struct con_driver *con = dev_get_drvdata(dev);
3774 int bind = con_is_bound(con->con); 3774 int bind;
3775
3776 console_lock();
3777 bind = con_is_bound(con->con);
3778 console_unlock();
3775 3779
3776 return snprintf(buf, PAGE_SIZE, "%i\n", bind); 3780 return snprintf(buf, PAGE_SIZE, "%i\n", bind);
3777} 3781}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index b5abfe89190c..df8812c30640 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -454,9 +454,11 @@ err_clk:
454 imx_disable_unprepare_clks(dev); 454 imx_disable_unprepare_clks(dev);
455disable_hsic_regulator: 455disable_hsic_regulator:
456 if (data->hsic_pad_regulator) 456 if (data->hsic_pad_regulator)
457 ret = regulator_disable(data->hsic_pad_regulator); 457 /* don't overwrite original ret (cf. EPROBE_DEFER) */
458 regulator_disable(data->hsic_pad_regulator);
458 if (pdata.flags & CI_HDRC_PMQOS) 459 if (pdata.flags & CI_HDRC_PMQOS)
459 pm_qos_remove_request(&data->pm_qos_req); 460 pm_qos_remove_request(&data->pm_qos_req);
461 data->ci_pdev = NULL;
460 return ret; 462 return ret;
461} 463}
462 464
@@ -469,14 +471,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
469 pm_runtime_disable(&pdev->dev); 471 pm_runtime_disable(&pdev->dev);
470 pm_runtime_put_noidle(&pdev->dev); 472 pm_runtime_put_noidle(&pdev->dev);
471 } 473 }
472 ci_hdrc_remove_device(data->ci_pdev); 474 if (data->ci_pdev)
475 ci_hdrc_remove_device(data->ci_pdev);
473 if (data->override_phy_control) 476 if (data->override_phy_control)
474 usb_phy_shutdown(data->phy); 477 usb_phy_shutdown(data->phy);
475 imx_disable_unprepare_clks(&pdev->dev); 478 if (data->ci_pdev) {
476 if (data->plat_data->flags & CI_HDRC_PMQOS) 479 imx_disable_unprepare_clks(&pdev->dev);
477 pm_qos_remove_request(&data->pm_qos_req); 480 if (data->plat_data->flags & CI_HDRC_PMQOS)
478 if (data->hsic_pad_regulator) 481 pm_qos_remove_request(&data->pm_qos_req);
479 regulator_disable(data->hsic_pad_regulator); 482 if (data->hsic_pad_regulator)
483 regulator_disable(data->hsic_pad_regulator);
484 }
480 485
481 return 0; 486 return 0;
482} 487}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6a5ee8e6da10..67ad40b0a05b 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
709 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); 709 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
710 unsigned long flags; 710 unsigned long flags;
711 711
712 spin_lock_irqsave(&ci->lock, flags);
713 ci->gadget.speed = USB_SPEED_UNKNOWN;
714 ci->remote_wakeup = 0;
715 ci->suspended = 0;
716 spin_unlock_irqrestore(&ci->lock, flags);
717
718 /* flush all endpoints */ 712 /* flush all endpoints */
719 gadget_for_each_ep(ep, gadget) { 713 gadget_for_each_ep(ep, gadget) {
720 usb_ep_fifo_flush(ep); 714 usb_ep_fifo_flush(ep);
@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
732 ci->status = NULL; 726 ci->status = NULL;
733 } 727 }
734 728
729 spin_lock_irqsave(&ci->lock, flags);
730 ci->gadget.speed = USB_SPEED_UNKNOWN;
731 ci->remote_wakeup = 0;
732 ci->suspended = 0;
733 spin_unlock_irqrestore(&ci->lock, flags);
734
735 return 0; 735 return 0;
736} 736}
737 737
@@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep)
1303 return -EBUSY; 1303 return -EBUSY;
1304 1304
1305 spin_lock_irqsave(hwep->lock, flags); 1305 spin_lock_irqsave(hwep->lock, flags);
1306 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1307 spin_unlock_irqrestore(hwep->lock, flags);
1308 return 0;
1309 }
1306 1310
1307 /* only internal SW should disable ctrl endpts */ 1311 /* only internal SW should disable ctrl endpts */
1308 1312
@@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1392 return -EINVAL; 1396 return -EINVAL;
1393 1397
1394 spin_lock_irqsave(hwep->lock, flags); 1398 spin_lock_irqsave(hwep->lock, flags);
1399 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1400 spin_unlock_irqrestore(hwep->lock, flags);
1401 return 0;
1402 }
1395 retval = _ep_queue(ep, req, gfp_flags); 1403 retval = _ep_queue(ep, req, gfp_flags);
1396 spin_unlock_irqrestore(hwep->lock, flags); 1404 spin_unlock_irqrestore(hwep->lock, flags);
1397 return retval; 1405 return retval;
@@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1415 return -EINVAL; 1423 return -EINVAL;
1416 1424
1417 spin_lock_irqsave(hwep->lock, flags); 1425 spin_lock_irqsave(hwep->lock, flags);
1418 1426 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1419 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1427 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1420 1428
1421 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { 1429 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1422 dma_pool_free(hwep->td_pool, node->ptr, node->dma); 1430 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
1487 } 1495 }
1488 1496
1489 spin_lock_irqsave(hwep->lock, flags); 1497 spin_lock_irqsave(hwep->lock, flags);
1498 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1499 spin_unlock_irqrestore(hwep->lock, flags);
1500 return;
1501 }
1490 1502
1491 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1503 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1492 1504
@@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
1559 int ret = 0; 1571 int ret = 0;
1560 1572
1561 spin_lock_irqsave(&ci->lock, flags); 1573 spin_lock_irqsave(&ci->lock, flags);
1574 if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1575 spin_unlock_irqrestore(&ci->lock, flags);
1576 return 0;
1577 }
1562 if (!ci->remote_wakeup) { 1578 if (!ci->remote_wakeup) {
1563 ret = -EOPNOTSUPP; 1579 ret = -EOPNOTSUPP;
1564 goto out; 1580 goto out;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 183b41753c98..62f4fb9b362f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1301,10 +1301,6 @@ made_compressed_probe:
1301 tty_port_init(&acm->port); 1301 tty_port_init(&acm->port);
1302 acm->port.ops = &acm_port_ops; 1302 acm->port.ops = &acm_port_ops;
1303 1303
1304 minor = acm_alloc_minor(acm);
1305 if (minor < 0)
1306 goto alloc_fail1;
1307
1308 ctrlsize = usb_endpoint_maxp(epctrl); 1304 ctrlsize = usb_endpoint_maxp(epctrl);
1309 readsize = usb_endpoint_maxp(epread) * 1305 readsize = usb_endpoint_maxp(epread) *
1310 (quirks == SINGLE_RX_URB ? 1 : 2); 1306 (quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1312,6 +1308,13 @@ made_compressed_probe:
1312 acm->writesize = usb_endpoint_maxp(epwrite) * 20; 1308 acm->writesize = usb_endpoint_maxp(epwrite) * 20;
1313 acm->control = control_interface; 1309 acm->control = control_interface;
1314 acm->data = data_interface; 1310 acm->data = data_interface;
1311
1312 usb_get_intf(acm->control); /* undone in destruct() */
1313
1314 minor = acm_alloc_minor(acm);
1315 if (minor < 0)
1316 goto alloc_fail1;
1317
1315 acm->minor = minor; 1318 acm->minor = minor;
1316 acm->dev = usb_dev; 1319 acm->dev = usb_dev;
1317 if (h.usb_cdc_acm_descriptor) 1320 if (h.usb_cdc_acm_descriptor)
@@ -1458,7 +1461,6 @@ skip_countries:
1458 usb_driver_claim_interface(&acm_driver, data_interface, acm); 1461 usb_driver_claim_interface(&acm_driver, data_interface, acm);
1459 usb_set_intfdata(data_interface, acm); 1462 usb_set_intfdata(data_interface, acm);
1460 1463
1461 usb_get_intf(control_interface);
1462 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, 1464 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1463 &control_interface->dev); 1465 &control_interface->dev);
1464 if (IS_ERR(tty_dev)) { 1466 if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index a7824a51f86d..70afb2ca1eab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
587{ 587{
588 struct wdm_device *desc = file->private_data; 588 struct wdm_device *desc = file->private_data;
589 589
590 wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); 590 wait_event(desc->wait,
591 /*
592 * needs both flags. We cannot do with one
593 * because resetting it would cause a race
594 * with write() yet we need to signal
595 * a disconnect
596 */
597 !test_bit(WDM_IN_USE, &desc->flags) ||
598 test_bit(WDM_DISCONNECTING, &desc->flags));
591 599
592 /* cannot dereference desc->intf if WDM_DISCONNECTING */ 600 /* cannot dereference desc->intf if WDM_DISCONNECTING */
593 if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) 601 if (test_bit(WDM_DISCONNECTING, &desc->flags))
602 return -ENODEV;
603 if (desc->werr < 0)
594 dev_err(&desc->intf->dev, "Error in flush path: %d\n", 604 dev_err(&desc->intf->dev, "Error in flush path: %d\n",
595 desc->werr); 605 desc->werr);
596 606
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
974 spin_lock_irqsave(&desc->iuspin, flags); 984 spin_lock_irqsave(&desc->iuspin, flags);
975 set_bit(WDM_DISCONNECTING, &desc->flags); 985 set_bit(WDM_DISCONNECTING, &desc->flags);
976 set_bit(WDM_READ, &desc->flags); 986 set_bit(WDM_READ, &desc->flags);
977 /* to terminate pending flushes */
978 clear_bit(WDM_IN_USE, &desc->flags);
979 spin_unlock_irqrestore(&desc->iuspin, flags); 987 spin_unlock_irqrestore(&desc->iuspin, flags);
980 wake_up_all(&desc->wait); 988 wake_up_all(&desc->wait);
981 mutex_lock(&desc->rlock); 989 mutex_lock(&desc->rlock);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 4942122b2346..36858ddd8d9b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -2362,8 +2362,11 @@ static int usbtmc_probe(struct usb_interface *intf,
2362 goto err_put; 2362 goto err_put;
2363 } 2363 }
2364 2364
2365 retcode = -EINVAL;
2365 data->bulk_in = bulk_in->bEndpointAddress; 2366 data->bulk_in = bulk_in->bEndpointAddress;
2366 data->wMaxPacketSize = usb_endpoint_maxp(bulk_in); 2367 data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
2368 if (!data->wMaxPacketSize)
2369 goto err_put;
2367 dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in); 2370 dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
2368 2371
2369 data->bulk_out = bulk_out->bEndpointAddress; 2372 data->bulk_out = bulk_out->bEndpointAddress;
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 1359b78a624e..6cf22c27f2d2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -66,9 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
66 char name[16]; 66 char name[16];
67 int i, size; 67 int i, size;
68 68
69 if (!IS_ENABLED(CONFIG_HAS_DMA) || 69 if (hcd->localmem_pool || !hcd_uses_dma(hcd))
70 (!is_device_dma_capable(hcd->self.sysdev) &&
71 !hcd->localmem_pool))
72 return 0; 70 return 0;
73 71
74 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 72 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
@@ -129,8 +127,7 @@ void *hcd_buffer_alloc(
129 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma); 127 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
130 128
131 /* some USB hosts just use PIO */ 129 /* some USB hosts just use PIO */
132 if (!IS_ENABLED(CONFIG_HAS_DMA) || 130 if (!hcd_uses_dma(hcd)) {
133 !is_device_dma_capable(bus->sysdev)) {
134 *dma = ~(dma_addr_t) 0; 131 *dma = ~(dma_addr_t) 0;
135 return kmalloc(size, mem_flags); 132 return kmalloc(size, mem_flags);
136 } 133 }
@@ -160,8 +157,7 @@ void hcd_buffer_free(
160 return; 157 return;
161 } 158 }
162 159
163 if (!IS_ENABLED(CONFIG_HAS_DMA) || 160 if (!hcd_uses_dma(hcd)) {
164 !is_device_dma_capable(bus->sysdev)) {
165 kfree(addr); 161 kfree(addr);
166 return; 162 return;
167 } 163 }
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b265ab5405f9..9063ede411ae 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1812,8 +1812,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1812 return 0; 1812 return 0;
1813 1813
1814 error: 1814 error:
1815 if (as && as->usbm)
1816 dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
1817 kfree(isopkt); 1815 kfree(isopkt);
1818 kfree(dr); 1816 kfree(dr);
1819 if (as) 1817 if (as)
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f73b672..558890ada0e5 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
193 intf->minor = minor; 193 intf->minor = minor;
194 break; 194 break;
195 } 195 }
196 up_write(&minor_rwsem); 196 if (intf->minor < 0) {
197 if (intf->minor < 0) 197 up_write(&minor_rwsem);
198 return -EXFULL; 198 return -EXFULL;
199 }
199 200
200 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
201 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
203 MKDEV(USB_MAJOR, minor), class_driver, 204 MKDEV(USB_MAJOR, minor), class_driver,
204 "%s", kbasename(name)); 205 "%s", kbasename(name));
205 if (IS_ERR(intf->usb_dev)) { 206 if (IS_ERR(intf->usb_dev)) {
206 down_write(&minor_rwsem);
207 usb_minors[minor] = NULL; 207 usb_minors[minor] = NULL;
208 intf->minor = -1; 208 intf->minor = -1;
209 up_write(&minor_rwsem);
210 retval = PTR_ERR(intf->usb_dev); 209 retval = PTR_ERR(intf->usb_dev);
211 } 210 }
211 up_write(&minor_rwsem);
212 return retval; 212 return retval;
213} 213}
214EXPORT_SYMBOL_GPL(usb_register_dev); 214EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
234 return; 234 return;
235 235
236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); 236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
237 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
237 238
238 down_write(&minor_rwsem); 239 down_write(&minor_rwsem);
239 usb_minors[intf->minor] = NULL; 240 usb_minors[intf->minor] = NULL;
240 up_write(&minor_rwsem); 241 up_write(&minor_rwsem);
241 242
242 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
243 intf->usb_dev = NULL; 243 intf->usb_dev = NULL;
244 intf->minor = -1; 244 intf->minor = -1;
245 destroy_usb_class(); 245 destroy_usb_class();
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 03432467b05f..7537681355f6 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
216 /* EHCI, OHCI */ 216 /* EHCI, OHCI */
217 hcd->rsrc_start = pci_resource_start(dev, 0); 217 hcd->rsrc_start = pci_resource_start(dev, 0);
218 hcd->rsrc_len = pci_resource_len(dev, 0); 218 hcd->rsrc_len = pci_resource_len(dev, 0);
219 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, 219 if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
220 driver->description)) { 220 hcd->rsrc_len, driver->description)) {
221 dev_dbg(&dev->dev, "controller already in use\n"); 221 dev_dbg(&dev->dev, "controller already in use\n");
222 retval = -EBUSY; 222 retval = -EBUSY;
223 goto put_hcd; 223 goto put_hcd;
224 } 224 }
225 hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); 225 hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
226 hcd->rsrc_len);
226 if (hcd->regs == NULL) { 227 if (hcd->regs == NULL) {
227 dev_dbg(&dev->dev, "error mapping memory\n"); 228 dev_dbg(&dev->dev, "error mapping memory\n");
228 retval = -EFAULT; 229 retval = -EFAULT;
229 goto release_mem_region; 230 goto put_hcd;
230 } 231 }
231 232
232 } else { 233 } else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
240 241
241 hcd->rsrc_start = pci_resource_start(dev, region); 242 hcd->rsrc_start = pci_resource_start(dev, region);
242 hcd->rsrc_len = pci_resource_len(dev, region); 243 hcd->rsrc_len = pci_resource_len(dev, region);
243 if (request_region(hcd->rsrc_start, hcd->rsrc_len, 244 if (devm_request_region(&dev->dev, hcd->rsrc_start,
244 driver->description)) 245 hcd->rsrc_len, driver->description))
245 break; 246 break;
246 } 247 }
247 if (region == PCI_ROM_RESOURCE) { 248 if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
275 } 276 }
276 277
277 if (retval != 0) 278 if (retval != 0)
278 goto unmap_registers; 279 goto put_hcd;
279 device_wakeup_enable(hcd->self.controller); 280 device_wakeup_enable(hcd->self.controller);
280 281
281 if (pci_dev_run_wake(dev)) 282 if (pci_dev_run_wake(dev))
282 pm_runtime_put_noidle(&dev->dev); 283 pm_runtime_put_noidle(&dev->dev);
283 return retval; 284 return retval;
284 285
285unmap_registers:
286 if (driver->flags & HCD_MEMORY) {
287 iounmap(hcd->regs);
288release_mem_region:
289 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
290 } else
291 release_region(hcd->rsrc_start, hcd->rsrc_len);
292put_hcd: 286put_hcd:
293 usb_put_hcd(hcd); 287 usb_put_hcd(hcd);
294disable_pci: 288disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
347 dev_set_drvdata(&dev->dev, NULL); 341 dev_set_drvdata(&dev->dev, NULL);
348 up_read(&companions_rwsem); 342 up_read(&companions_rwsem);
349 } 343 }
350
351 if (hcd->driver->flags & HCD_MEMORY) {
352 iounmap(hcd->regs);
353 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
354 } else {
355 release_region(hcd->rsrc_start, hcd->rsrc_len);
356 }
357
358 usb_put_hcd(hcd); 344 usb_put_hcd(hcd);
359 pci_disable_device(dev); 345 pci_disable_device(dev);
360} 346}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 88533938ce19..8592c0344fe8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -103,11 +103,6 @@ static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
103/* wait queue for synchronous unlinks */ 103/* wait queue for synchronous unlinks */
104DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); 104DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
105 105
106static inline int is_root_hub(struct usb_device *udev)
107{
108 return (udev->parent == NULL);
109}
110
111/*-------------------------------------------------------------------------*/ 106/*-------------------------------------------------------------------------*/
112 107
113/* 108/*
@@ -880,101 +875,6 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
880} 875}
881 876
882 877
883
884/*
885 * Show & store the current value of authorized_default
886 */
887static ssize_t authorized_default_show(struct device *dev,
888 struct device_attribute *attr, char *buf)
889{
890 struct usb_device *rh_usb_dev = to_usb_device(dev);
891 struct usb_bus *usb_bus = rh_usb_dev->bus;
892 struct usb_hcd *hcd;
893
894 hcd = bus_to_hcd(usb_bus);
895 return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
896}
897
898static ssize_t authorized_default_store(struct device *dev,
899 struct device_attribute *attr,
900 const char *buf, size_t size)
901{
902 ssize_t result;
903 unsigned val;
904 struct usb_device *rh_usb_dev = to_usb_device(dev);
905 struct usb_bus *usb_bus = rh_usb_dev->bus;
906 struct usb_hcd *hcd;
907
908 hcd = bus_to_hcd(usb_bus);
909 result = sscanf(buf, "%u\n", &val);
910 if (result == 1) {
911 hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
912 val : USB_DEVICE_AUTHORIZE_ALL;
913 result = size;
914 } else {
915 result = -EINVAL;
916 }
917 return result;
918}
919static DEVICE_ATTR_RW(authorized_default);
920
921/*
922 * interface_authorized_default_show - show default authorization status
923 * for USB interfaces
924 *
925 * note: interface_authorized_default is the default value
926 * for initializing the authorized attribute of interfaces
927 */
928static ssize_t interface_authorized_default_show(struct device *dev,
929 struct device_attribute *attr, char *buf)
930{
931 struct usb_device *usb_dev = to_usb_device(dev);
932 struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
933
934 return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
935}
936
937/*
938 * interface_authorized_default_store - store default authorization status
939 * for USB interfaces
940 *
941 * note: interface_authorized_default is the default value
942 * for initializing the authorized attribute of interfaces
943 */
944static ssize_t interface_authorized_default_store(struct device *dev,
945 struct device_attribute *attr, const char *buf, size_t count)
946{
947 struct usb_device *usb_dev = to_usb_device(dev);
948 struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
949 int rc = count;
950 bool val;
951
952 if (strtobool(buf, &val) != 0)
953 return -EINVAL;
954
955 if (val)
956 set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
957 else
958 clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
959
960 return rc;
961}
962static DEVICE_ATTR_RW(interface_authorized_default);
963
964/* Group all the USB bus attributes */
965static struct attribute *usb_bus_attrs[] = {
966 &dev_attr_authorized_default.attr,
967 &dev_attr_interface_authorized_default.attr,
968 NULL,
969};
970
971static const struct attribute_group usb_bus_attr_group = {
972 .name = NULL, /* we want them in the same directory */
973 .attrs = usb_bus_attrs,
974};
975
976
977
978/*-------------------------------------------------------------------------*/ 878/*-------------------------------------------------------------------------*/
979 879
980/** 880/**
@@ -1512,7 +1412,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1512 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1412 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1513 if (hcd->self.uses_pio_for_control) 1413 if (hcd->self.uses_pio_for_control)
1514 return ret; 1414 return ret;
1515 if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1415 if (hcd_uses_dma(hcd)) {
1516 if (is_vmalloc_addr(urb->setup_packet)) { 1416 if (is_vmalloc_addr(urb->setup_packet)) {
1517 WARN_ONCE(1, "setup packet is not dma capable\n"); 1417 WARN_ONCE(1, "setup packet is not dma capable\n");
1518 return -EAGAIN; 1418 return -EAGAIN;
@@ -1546,7 +1446,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1546 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1446 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1547 if (urb->transfer_buffer_length != 0 1447 if (urb->transfer_buffer_length != 0
1548 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1448 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1549 if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1449 if (hcd_uses_dma(hcd)) {
1550 if (urb->num_sgs) { 1450 if (urb->num_sgs) {
1551 int n; 1451 int n;
1552 1452
@@ -2894,32 +2794,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
2894 if (retval != 0) 2794 if (retval != 0)
2895 goto err_register_root_hub; 2795 goto err_register_root_hub;
2896 2796
2897 retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
2898 if (retval < 0) {
2899 printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n",
2900 retval);
2901 goto error_create_attr_group;
2902 }
2903 if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) 2797 if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
2904 usb_hcd_poll_rh_status(hcd); 2798 usb_hcd_poll_rh_status(hcd);
2905 2799
2906 return retval; 2800 return retval;
2907 2801
2908error_create_attr_group:
2909 clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2910 if (HC_IS_RUNNING(hcd->state))
2911 hcd->state = HC_STATE_QUIESCING;
2912 spin_lock_irq(&hcd_root_hub_lock);
2913 hcd->rh_registered = 0;
2914 spin_unlock_irq(&hcd_root_hub_lock);
2915
2916#ifdef CONFIG_PM
2917 cancel_work_sync(&hcd->wakeup_work);
2918#endif
2919 cancel_work_sync(&hcd->died_work);
2920 mutex_lock(&usb_bus_idr_lock);
2921 usb_disconnect(&rhdev); /* Sets rhdev to NULL */
2922 mutex_unlock(&usb_bus_idr_lock);
2923err_register_root_hub: 2802err_register_root_hub:
2924 hcd->rh_pollable = 0; 2803 hcd->rh_pollable = 0;
2925 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2804 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2963,8 +2842,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
2963 dev_info(hcd->self.controller, "remove, state %x\n", hcd->state); 2842 dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
2964 2843
2965 usb_get_dev(rhdev); 2844 usb_get_dev(rhdev);
2966 sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group);
2967
2968 clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); 2845 clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2969 if (HC_IS_RUNNING (hcd->state)) 2846 if (HC_IS_RUNNING (hcd->state))
2970 hcd->state = HC_STATE_QUIESCING; 2847 hcd->state = HC_STATE_QUIESCING;
@@ -3052,8 +2929,8 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
3052 2929
3053 local_mem = devm_memremap(hcd->self.sysdev, phys_addr, 2930 local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
3054 size, MEMREMAP_WC); 2931 size, MEMREMAP_WC);
3055 if (!local_mem) 2932 if (IS_ERR(local_mem))
3056 return -ENOMEM; 2933 return PTR_ERR(local_mem);
3057 2934
3058 /* 2935 /*
3059 * Here we pass a dma_addr_t but the arg type is a phys_addr_t. 2936 * Here we pass a dma_addr_t but the arg type is a phys_addr_t.
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e844bb7b5676..5adf489428aa 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
2218 (struct usb_cdc_dmm_desc *)buffer; 2218 (struct usb_cdc_dmm_desc *)buffer;
2219 break; 2219 break;
2220 case USB_CDC_MDLM_TYPE: 2220 case USB_CDC_MDLM_TYPE:
2221 if (elength < sizeof(struct usb_cdc_mdlm_desc *)) 2221 if (elength < sizeof(struct usb_cdc_mdlm_desc))
2222 goto next_desc; 2222 goto next_desc;
2223 if (desc) 2223 if (desc)
2224 return -EINVAL; 2224 return -EINVAL;
2225 desc = (struct usb_cdc_mdlm_desc *)buffer; 2225 desc = (struct usb_cdc_mdlm_desc *)buffer;
2226 break; 2226 break;
2227 case USB_CDC_MDLM_DETAIL_TYPE: 2227 case USB_CDC_MDLM_DETAIL_TYPE:
2228 if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) 2228 if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
2229 goto next_desc; 2229 goto next_desc;
2230 if (detail) 2230 if (detail)
2231 return -EINVAL; 2231 return -EINVAL;
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 7e88fdfe3cf5..f19694e69f5c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/usb.h> 17#include <linux/usb.h>
18#include <linux/usb/hcd.h>
18#include <linux/usb/quirks.h> 19#include <linux/usb/quirks.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include "usb.h" 21#include "usb.h"
@@ -922,6 +923,116 @@ static struct bin_attribute dev_bin_attr_descriptors = {
922 .size = 18 + 65535, /* dev descr + max-size raw descriptor */ 923 .size = 18 + 65535, /* dev descr + max-size raw descriptor */
923}; 924};
924 925
926/*
927 * Show & store the current value of authorized_default
928 */
929static ssize_t authorized_default_show(struct device *dev,
930 struct device_attribute *attr, char *buf)
931{
932 struct usb_device *rh_usb_dev = to_usb_device(dev);
933 struct usb_bus *usb_bus = rh_usb_dev->bus;
934 struct usb_hcd *hcd;
935
936 hcd = bus_to_hcd(usb_bus);
937 return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
938}
939
940static ssize_t authorized_default_store(struct device *dev,
941 struct device_attribute *attr,
942 const char *buf, size_t size)
943{
944 ssize_t result;
945 unsigned int val;
946 struct usb_device *rh_usb_dev = to_usb_device(dev);
947 struct usb_bus *usb_bus = rh_usb_dev->bus;
948 struct usb_hcd *hcd;
949
950 hcd = bus_to_hcd(usb_bus);
951 result = sscanf(buf, "%u\n", &val);
952 if (result == 1) {
953 hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
954 val : USB_DEVICE_AUTHORIZE_ALL;
955 result = size;
956 } else {
957 result = -EINVAL;
958 }
959 return result;
960}
961static DEVICE_ATTR_RW(authorized_default);
962
963/*
964 * interface_authorized_default_show - show default authorization status
965 * for USB interfaces
966 *
967 * note: interface_authorized_default is the default value
968 * for initializing the authorized attribute of interfaces
969 */
970static ssize_t interface_authorized_default_show(struct device *dev,
971 struct device_attribute *attr, char *buf)
972{
973 struct usb_device *usb_dev = to_usb_device(dev);
974 struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
975
976 return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
977}
978
979/*
980 * interface_authorized_default_store - store default authorization status
981 * for USB interfaces
982 *
983 * note: interface_authorized_default is the default value
984 * for initializing the authorized attribute of interfaces
985 */
986static ssize_t interface_authorized_default_store(struct device *dev,
987 struct device_attribute *attr, const char *buf, size_t count)
988{
989 struct usb_device *usb_dev = to_usb_device(dev);
990 struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
991 int rc = count;
992 bool val;
993
994 if (strtobool(buf, &val) != 0)
995 return -EINVAL;
996
997 if (val)
998 set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
999 else
1000 clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
1001
1002 return rc;
1003}
1004static DEVICE_ATTR_RW(interface_authorized_default);
1005
1006/* Group all the USB bus attributes */
1007static struct attribute *usb_bus_attrs[] = {
1008 &dev_attr_authorized_default.attr,
1009 &dev_attr_interface_authorized_default.attr,
1010 NULL,
1011};
1012
1013static const struct attribute_group usb_bus_attr_group = {
1014 .name = NULL, /* we want them in the same directory */
1015 .attrs = usb_bus_attrs,
1016};
1017
1018
1019static int add_default_authorized_attributes(struct device *dev)
1020{
1021 int rc = 0;
1022
1023 if (is_usb_device(dev))
1024 rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group);
1025
1026 return rc;
1027}
1028
1029static void remove_default_authorized_attributes(struct device *dev)
1030{
1031 if (is_usb_device(dev)) {
1032 sysfs_remove_group(&dev->kobj, &usb_bus_attr_group);
1033 }
1034}
1035
925int usb_create_sysfs_dev_files(struct usb_device *udev) 1036int usb_create_sysfs_dev_files(struct usb_device *udev)
926{ 1037{
927 struct device *dev = &udev->dev; 1038 struct device *dev = &udev->dev;
@@ -938,7 +1049,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
938 retval = add_power_attributes(dev); 1049 retval = add_power_attributes(dev);
939 if (retval) 1050 if (retval)
940 goto error; 1051 goto error;
1052
1053 if (is_root_hub(udev)) {
1054 retval = add_default_authorized_attributes(dev);
1055 if (retval)
1056 goto error;
1057 }
941 return retval; 1058 return retval;
1059
942error: 1060error:
943 usb_remove_sysfs_dev_files(udev); 1061 usb_remove_sysfs_dev_files(udev);
944 return retval; 1062 return retval;
@@ -948,6 +1066,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
948{ 1066{
949 struct device *dev = &udev->dev; 1067 struct device *dev = &udev->dev;
950 1068
1069 if (is_root_hub(udev))
1070 remove_default_authorized_attributes(dev);
1071
951 remove_power_attributes(dev); 1072 remove_power_attributes(dev);
952 remove_persist_attributes(dev); 1073 remove_persist_attributes(dev);
953 device_remove_bin_file(dev, &dev_bin_attr_descriptors); 1074 device_remove_bin_file(dev, &dev_bin_attr_descriptors);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index bd8d01f85a13..0c9fde5ad052 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -153,6 +153,11 @@ static inline int is_usb_port(const struct device *dev)
153 return dev->type == &usb_port_device_type; 153 return dev->type == &usb_port_device_type;
154} 154}
155 155
156static inline int is_root_hub(struct usb_device *udev)
157{
158 return (udev->parent == NULL);
159}
160
156/* Do the same for device drivers and interface drivers. */ 161/* Do the same for device drivers and interface drivers. */
157 162
158static inline int is_usb_device_driver(struct device_driver *drv) 163static inline int is_usb_device_driver(struct device_driver *drv)
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index ee144ff8af5b..111787a137ee 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4608,7 +4608,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
4608 4608
4609 buf = urb->transfer_buffer; 4609 buf = urb->transfer_buffer;
4610 4610
4611 if (hcd->self.uses_dma) { 4611 if (hcd_uses_dma(hcd)) {
4612 if (!buf && (urb->transfer_dma & 3)) { 4612 if (!buf && (urb->transfer_dma & 3)) {
4613 dev_err(hsotg->dev, 4613 dev_err(hsotg->dev,
4614 "%s: unaligned transfer with no transfer_buffer", 4614 "%s: unaligned transfer with no transfer_buffer",
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 9118b42c70b6..76883ff4f5bb 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget)
1976 * disconnect callbacks? 1976 * disconnect callbacks?
1977 */ 1977 */
1978 spin_lock_irqsave(&cdev->lock, flags); 1978 spin_lock_irqsave(&cdev->lock, flags);
1979 cdev->suspended = 0;
1979 if (cdev->config) 1980 if (cdev->config)
1980 reset_config(cdev); 1981 reset_config(cdev);
1981 if (cdev->driver->disconnect) 1982 if (cdev->driver->disconnect)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 29cc5693e05c..7c96c4665178 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -261,7 +261,7 @@ struct fsg_common;
261struct fsg_common { 261struct fsg_common {
262 struct usb_gadget *gadget; 262 struct usb_gadget *gadget;
263 struct usb_composite_dev *cdev; 263 struct usb_composite_dev *cdev;
264 struct fsg_dev *fsg, *new_fsg; 264 struct fsg_dev *fsg;
265 wait_queue_head_t io_wait; 265 wait_queue_head_t io_wait;
266 wait_queue_head_t fsg_wait; 266 wait_queue_head_t fsg_wait;
267 267
@@ -290,6 +290,7 @@ struct fsg_common {
290 unsigned int bulk_out_maxpacket; 290 unsigned int bulk_out_maxpacket;
291 enum fsg_state state; /* For exception handling */ 291 enum fsg_state state; /* For exception handling */
292 unsigned int exception_req_tag; 292 unsigned int exception_req_tag;
293 void *exception_arg;
293 294
294 enum data_direction data_dir; 295 enum data_direction data_dir;
295 u32 data_size; 296 u32 data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
391 392
392/* These routines may be called in process context or in_irq */ 393/* These routines may be called in process context or in_irq */
393 394
394static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 395static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
396 void *arg)
395{ 397{
396 unsigned long flags; 398 unsigned long flags;
397 399
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
404 if (common->state <= new_state) { 406 if (common->state <= new_state) {
405 common->exception_req_tag = common->ep0_req_tag; 407 common->exception_req_tag = common->ep0_req_tag;
406 common->state = new_state; 408 common->state = new_state;
409 common->exception_arg = arg;
407 if (common->thread_task) 410 if (common->thread_task)
408 send_sig_info(SIGUSR1, SEND_SIG_PRIV, 411 send_sig_info(SIGUSR1, SEND_SIG_PRIV,
409 common->thread_task); 412 common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
411 spin_unlock_irqrestore(&common->lock, flags); 414 spin_unlock_irqrestore(&common->lock, flags);
412} 415}
413 416
417static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
418{
419 __raise_exception(common, new_state, NULL);
420}
414 421
415/*-------------------------------------------------------------------------*/ 422/*-------------------------------------------------------------------------*/
416 423
@@ -2285,16 +2292,16 @@ reset:
2285static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2292static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2286{ 2293{
2287 struct fsg_dev *fsg = fsg_from_func(f); 2294 struct fsg_dev *fsg = fsg_from_func(f);
2288 fsg->common->new_fsg = fsg; 2295
2289 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2296 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
2290 return USB_GADGET_DELAYED_STATUS; 2297 return USB_GADGET_DELAYED_STATUS;
2291} 2298}
2292 2299
2293static void fsg_disable(struct usb_function *f) 2300static void fsg_disable(struct usb_function *f)
2294{ 2301{
2295 struct fsg_dev *fsg = fsg_from_func(f); 2302 struct fsg_dev *fsg = fsg_from_func(f);
2296 fsg->common->new_fsg = NULL; 2303
2297 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2304 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2298} 2305}
2299 2306
2300 2307
@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
2307 enum fsg_state old_state; 2314 enum fsg_state old_state;
2308 struct fsg_lun *curlun; 2315 struct fsg_lun *curlun;
2309 unsigned int exception_req_tag; 2316 unsigned int exception_req_tag;
2317 struct fsg_dev *new_fsg;
2310 2318
2311 /* 2319 /*
2312 * Clear the existing signals. Anything but SIGUSR1 is converted 2320 * Clear the existing signals. Anything but SIGUSR1 is converted
@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
2360 common->next_buffhd_to_fill = &common->buffhds[0]; 2368 common->next_buffhd_to_fill = &common->buffhds[0];
2361 common->next_buffhd_to_drain = &common->buffhds[0]; 2369 common->next_buffhd_to_drain = &common->buffhds[0];
2362 exception_req_tag = common->exception_req_tag; 2370 exception_req_tag = common->exception_req_tag;
2371 new_fsg = common->exception_arg;
2363 old_state = common->state; 2372 old_state = common->state;
2364 common->state = FSG_STATE_NORMAL; 2373 common->state = FSG_STATE_NORMAL;
2365 2374
@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
2413 break; 2422 break;
2414 2423
2415 case FSG_STATE_CONFIG_CHANGE: 2424 case FSG_STATE_CONFIG_CHANGE:
2416 do_set_interface(common, common->new_fsg); 2425 do_set_interface(common, new_fsg);
2417 if (common->new_fsg) 2426 if (new_fsg)
2418 usb_composite_setup_continue(common->cdev); 2427 usb_composite_setup_continue(common->cdev);
2419 break; 2428 break;
2420 2429
@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2989 2998
2990 DBG(fsg, "unbind\n"); 2999 DBG(fsg, "unbind\n");
2991 if (fsg->common->fsg == fsg) { 3000 if (fsg->common->fsg == fsg) {
2992 fsg->common->new_fsg = NULL; 3001 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2993 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2994 /* FIXME: make interruptible or killable somehow? */ 3002 /* FIXME: make interruptible or killable somehow? */
2995 wait_event(common->fsg_wait, common->fsg != fsg); 3003 wait_event(common->fsg_wait, common->fsg != fsg);
2996 } 3004 }
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 5f1b14f3e5a0..bb6af6b5ac97 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -2265,7 +2265,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
2265 default: 2265 default:
2266 break; 2266 break;
2267 } 2267 }
2268 2268 break;
2269 2269
2270 case USB_REQ_SET_ADDRESS: 2270 case USB_REQ_SET_ADDRESS:
2271 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { 2271 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 87062d22134d..1f4c3fbd1df8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/sizes.h> 20#include <linux/sizes.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/string.h>
22#include <linux/sys_soc.h> 23#include <linux/sys_soc.h>
23#include <linux/uaccess.h> 24#include <linux/uaccess.h>
24#include <linux/usb/ch9.h> 25#include <linux/usb/ch9.h>
@@ -2450,9 +2451,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
2450 if (usb3->forced_b_device) 2451 if (usb3->forced_b_device)
2451 return -EBUSY; 2452 return -EBUSY;
2452 2453
2453 if (!strncmp(buf, "host", strlen("host"))) 2454 if (sysfs_streq(buf, "host"))
2454 new_mode_is_host = true; 2455 new_mode_is_host = true;
2455 else if (!strncmp(buf, "peripheral", strlen("peripheral"))) 2456 else if (sysfs_streq(buf, "peripheral"))
2456 new_mode_is_host = false; 2457 new_mode_is_host = false;
2457 else 2458 else
2458 return -EINVAL; 2459 return -EINVAL;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index fe9422d3bcdc..b0882c13a1d1 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -149,7 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
149 break; 149 break;
150 case PCI_VENDOR_ID_AMD: 150 case PCI_VENDOR_ID_AMD:
151 /* AMD PLL quirk */ 151 /* AMD PLL quirk */
152 if (usb_amd_find_chipset_info()) 152 if (usb_amd_quirk_pll_check())
153 ehci->amd_pll_fix = 1; 153 ehci->amd_pll_fix = 1;
154 /* AMD8111 EHCI doesn't work, according to AMD errata */ 154 /* AMD8111 EHCI doesn't work, according to AMD errata */
155 if (pdev->device == 0x7463) { 155 if (pdev->device == 0x7463) {
@@ -186,7 +186,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
186 break; 186 break;
187 case PCI_VENDOR_ID_ATI: 187 case PCI_VENDOR_ID_ATI:
188 /* AMD PLL quirk */ 188 /* AMD PLL quirk */
189 if (usb_amd_find_chipset_info()) 189 if (usb_amd_quirk_pll_check())
190 ehci->amd_pll_fix = 1; 190 ehci->amd_pll_fix = 1;
191 191
192 /* 192 /*
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 77cc36efae95..0dbfa5c10703 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1629,6 +1629,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1629 /* see what we found out */ 1629 /* see what we found out */
1630 temp = check_reset_complete(fotg210, wIndex, status_reg, 1630 temp = check_reset_complete(fotg210, wIndex, status_reg,
1631 fotg210_readl(fotg210, status_reg)); 1631 fotg210_readl(fotg210, status_reg));
1632
1633 /* restart schedule */
1634 fotg210->command |= CMD_RUN;
1635 fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1632 } 1636 }
1633 1637
1634 if (!(temp & (PORT_RESUME|PORT_RESET))) { 1638 if (!(temp & (PORT_RESUME|PORT_RESET))) {
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 09a8ebd95588..6968b9f2b76b 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -159,7 +159,7 @@ out:
159 return result; 159 return result;
160 160
161error_set_cluster_id: 161error_set_cluster_id:
162 wusb_cluster_id_put(wusbhc->cluster_id); 162 wusb_cluster_id_put(addr);
163error_cluster_id_get: 163error_cluster_id_get:
164 goto out; 164 goto out;
165 165
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b457fdaff297..1fe3deec35cf 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
419 * other cases where the next software may expect clean state from the 419 * other cases where the next software may expect clean state from the
420 * "firmware". this is bus-neutral, unlike shutdown() methods. 420 * "firmware". this is bus-neutral, unlike shutdown() methods.
421 */ 421 */
422static void 422static void _ohci_shutdown(struct usb_hcd *hcd)
423ohci_shutdown (struct usb_hcd *hcd)
424{ 423{
425 struct ohci_hcd *ohci; 424 struct ohci_hcd *ohci;
426 425
@@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd)
436 ohci->rh_state = OHCI_RH_HALTED; 435 ohci->rh_state = OHCI_RH_HALTED;
437} 436}
438 437
438static void ohci_shutdown(struct usb_hcd *hcd)
439{
440 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
441 unsigned long flags;
442
443 spin_lock_irqsave(&ohci->lock, flags);
444 _ohci_shutdown(hcd);
445 spin_unlock_irqrestore(&ohci->lock, flags);
446}
447
439/*-------------------------------------------------------------------------* 448/*-------------------------------------------------------------------------*
440 * HC functions 449 * HC functions
441 *-------------------------------------------------------------------------*/ 450 *-------------------------------------------------------------------------*/
@@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t)
760 died: 769 died:
761 usb_hc_died(ohci_to_hcd(ohci)); 770 usb_hc_died(ohci_to_hcd(ohci));
762 ohci_dump(ohci); 771 ohci_dump(ohci);
763 ohci_shutdown(ohci_to_hcd(ohci)); 772 _ohci_shutdown(ohci_to_hcd(ohci));
764 goto done; 773 goto done;
765 } else { 774 } else {
766 /* No write back because the done queue was empty */ 775 /* No write back because the done queue was empty */
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a033f7d855e0..f4e13a3fddee 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -152,7 +152,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
152{ 152{
153 struct ohci_hcd *ohci = hcd_to_ohci(hcd); 153 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
154 154
155 if (usb_amd_find_chipset_info()) 155 if (usb_amd_quirk_pll_check())
156 ohci->flags |= OHCI_QUIRK_AMD_PLL; 156 ohci->flags |= OHCI_QUIRK_AMD_PLL;
157 157
158 /* SB800 needs pre-fetch fix */ 158 /* SB800 needs pre-fetch fix */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3ce71cbfbb58..f6d04491df60 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -132,7 +132,7 @@ static struct amd_chipset_info {
132 struct amd_chipset_type sb_type; 132 struct amd_chipset_type sb_type;
133 int isoc_reqs; 133 int isoc_reqs;
134 int probe_count; 134 int probe_count;
135 int probe_result; 135 bool need_pll_quirk;
136} amd_chipset; 136} amd_chipset;
137 137
138static DEFINE_SPINLOCK(amd_lock); 138static DEFINE_SPINLOCK(amd_lock);
@@ -201,11 +201,11 @@ void sb800_prefetch(struct device *dev, int on)
201} 201}
202EXPORT_SYMBOL_GPL(sb800_prefetch); 202EXPORT_SYMBOL_GPL(sb800_prefetch);
203 203
204int usb_amd_find_chipset_info(void) 204static void usb_amd_find_chipset_info(void)
205{ 205{
206 unsigned long flags; 206 unsigned long flags;
207 struct amd_chipset_info info; 207 struct amd_chipset_info info;
208 int ret; 208 info.need_pll_quirk = 0;
209 209
210 spin_lock_irqsave(&amd_lock, flags); 210 spin_lock_irqsave(&amd_lock, flags);
211 211
@@ -213,27 +213,34 @@ int usb_amd_find_chipset_info(void)
213 if (amd_chipset.probe_count > 0) { 213 if (amd_chipset.probe_count > 0) {
214 amd_chipset.probe_count++; 214 amd_chipset.probe_count++;
215 spin_unlock_irqrestore(&amd_lock, flags); 215 spin_unlock_irqrestore(&amd_lock, flags);
216 return amd_chipset.probe_result; 216 return;
217 } 217 }
218 memset(&info, 0, sizeof(info)); 218 memset(&info, 0, sizeof(info));
219 spin_unlock_irqrestore(&amd_lock, flags); 219 spin_unlock_irqrestore(&amd_lock, flags);
220 220
221 if (!amd_chipset_sb_type_init(&info)) { 221 if (!amd_chipset_sb_type_init(&info)) {
222 ret = 0;
223 goto commit; 222 goto commit;
224 } 223 }
225 224
226 /* Below chipset generations needn't enable AMD PLL quirk */ 225 switch (info.sb_type.gen) {
227 if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || 226 case AMD_CHIPSET_SB700:
228 info.sb_type.gen == AMD_CHIPSET_SB600 || 227 info.need_pll_quirk = info.sb_type.rev <= 0x3B;
229 info.sb_type.gen == AMD_CHIPSET_YANGTZE || 228 break;
230 (info.sb_type.gen == AMD_CHIPSET_SB700 && 229 case AMD_CHIPSET_SB800:
231 info.sb_type.rev > 0x3b)) { 230 case AMD_CHIPSET_HUDSON2:
231 case AMD_CHIPSET_BOLTON:
232 info.need_pll_quirk = 1;
233 break;
234 default:
235 info.need_pll_quirk = 0;
236 break;
237 }
238
239 if (!info.need_pll_quirk) {
232 if (info.smbus_dev) { 240 if (info.smbus_dev) {
233 pci_dev_put(info.smbus_dev); 241 pci_dev_put(info.smbus_dev);
234 info.smbus_dev = NULL; 242 info.smbus_dev = NULL;
235 } 243 }
236 ret = 0;
237 goto commit; 244 goto commit;
238 } 245 }
239 246
@@ -252,7 +259,6 @@ int usb_amd_find_chipset_info(void)
252 } 259 }
253 } 260 }
254 261
255 ret = info.probe_result = 1;
256 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 262 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
257 263
258commit: 264commit:
@@ -263,7 +269,6 @@ commit:
263 269
264 /* Mark that we where here */ 270 /* Mark that we where here */
265 amd_chipset.probe_count++; 271 amd_chipset.probe_count++;
266 ret = amd_chipset.probe_result;
267 272
268 spin_unlock_irqrestore(&amd_lock, flags); 273 spin_unlock_irqrestore(&amd_lock, flags);
269 274
@@ -276,10 +281,7 @@ commit:
276 amd_chipset = info; 281 amd_chipset = info;
277 spin_unlock_irqrestore(&amd_lock, flags); 282 spin_unlock_irqrestore(&amd_lock, flags);
278 } 283 }
279
280 return ret;
281} 284}
282EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
283 285
284int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) 286int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
285{ 287{
@@ -315,6 +317,13 @@ bool usb_amd_prefetch_quirk(void)
315} 317}
316EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); 318EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
317 319
320bool usb_amd_quirk_pll_check(void)
321{
322 usb_amd_find_chipset_info();
323 return amd_chipset.need_pll_quirk;
324}
325EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
326
318/* 327/*
319 * The hardware normally enables the A-link power management feature, which 328 * The hardware normally enables the A-link power management feature, which
320 * lets the system lower the power consumption in idle states. 329 * lets the system lower the power consumption in idle states.
@@ -520,7 +529,7 @@ void usb_amd_dev_put(void)
520 amd_chipset.nb_type = 0; 529 amd_chipset.nb_type = 0;
521 memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); 530 memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
522 amd_chipset.isoc_reqs = 0; 531 amd_chipset.isoc_reqs = 0;
523 amd_chipset.probe_result = 0; 532 amd_chipset.need_pll_quirk = 0;
524 533
525 spin_unlock_irqrestore(&amd_lock, flags); 534 spin_unlock_irqrestore(&amd_lock, flags);
526 535
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 63c633077d9e..e729de21fad7 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -5,11 +5,11 @@
5#ifdef CONFIG_USB_PCI 5#ifdef CONFIG_USB_PCI
6void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); 6void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
7int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); 7int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
8int usb_amd_find_chipset_info(void);
9int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); 8int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
10bool usb_amd_hang_symptom_quirk(void); 9bool usb_amd_hang_symptom_quirk(void);
11bool usb_amd_prefetch_quirk(void); 10bool usb_amd_prefetch_quirk(void);
12void usb_amd_dev_put(void); 11void usb_amd_dev_put(void);
12bool usb_amd_quirk_pll_check(void);
13void usb_amd_quirk_pll_disable(void); 13void usb_amd_quirk_pll_disable(void);
14void usb_amd_quirk_pll_enable(void); 14void usb_amd_quirk_pll_enable(void);
15void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); 15void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2fe218e051f..1e0236e90687 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -130,7 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
130 xhci->quirks |= XHCI_AMD_0x96_HOST; 130 xhci->quirks |= XHCI_AMD_0x96_HOST;
131 131
132 /* AMD PLL quirk */ 132 /* AMD PLL quirk */
133 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 133 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check())
134 xhci->quirks |= XHCI_AMD_PLL_FIX; 134 xhci->quirks |= XHCI_AMD_PLL_FIX;
135 135
136 if (pdev->vendor == PCI_VENDOR_ID_AMD && 136 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 671bce18782c..2b0ccd150209 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
104 return of_device_is_compatible(node, "renesas,xhci-r8a7790") || 104 return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
105 of_device_is_compatible(node, "renesas,xhci-r8a7791") || 105 of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
106 of_device_is_compatible(node, "renesas,xhci-r8a7793") || 106 of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
107 of_device_is_compatible(node, "renensas,rcar-gen2-xhci"); 107 of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
108} 108}
109 109
110static int xhci_rcar_is_gen3(struct device *dev) 110static int xhci_rcar_is_gen3(struct device *dev)
@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
238 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params 238 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
239 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in 239 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
240 * xhci_gen_setup(). 240 * xhci_gen_setup().
241 *
242 * And, since the firmware/internal CPU control the USBSTS.STS_HALT
243 * and the process speed is down when the roothub port enters U3,
244 * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
241 */ 245 */
242 if (xhci_rcar_is_gen2(hcd->self.controller) || 246 if (xhci_rcar_is_gen2(hcd->self.controller) ||
243 xhci_rcar_is_gen3(hcd->self.controller)) 247 xhci_rcar_is_gen3(hcd->self.controller)) {
244 xhci->quirks |= XHCI_NO_64BIT_SUPPORT; 248 xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
249 }
245 250
246 if (!xhci_rcar_wait_for_pll_active(hcd)) 251 if (!xhci_rcar_wait_for_pll_active(hcd))
247 return -ETIMEDOUT; 252 return -ETIMEDOUT;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index dafc65911fc0..2ff7c911fbd0 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
1194 1194
1195 tegra_xusb_config(tegra, regs); 1195 tegra_xusb_config(tegra, regs);
1196 1196
1197 /*
1198 * The XUSB Falcon microcontroller can only address 40 bits, so set
1199 * the DMA mask accordingly.
1200 */
1201 err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
1202 if (err < 0) {
1203 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1204 goto put_rpm;
1205 }
1206
1197 err = tegra_xusb_load_firmware(tegra); 1207 err = tegra_xusb_load_firmware(tegra);
1198 if (err < 0) { 1208 if (err < 0) {
1199 dev_err(&pdev->dev, "failed to load firmware: %d\n", err); 1209 dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 248cd7a8b163..03d1e552769b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3089,8 +3089,18 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3089 return; 3089 return;
3090 udev = (struct usb_device *) host_ep->hcpriv; 3090 udev = (struct usb_device *) host_ep->hcpriv;
3091 vdev = xhci->devs[udev->slot_id]; 3091 vdev = xhci->devs[udev->slot_id];
3092
3093 /*
3094 * vdev may be lost due to xHC restore error and re-initialization
3095 * during S3/S4 resume. A new vdev will be allocated later by
3096 * xhci_discover_or_reset_device()
3097 */
3098 if (!udev->slot_id || !vdev)
3099 return;
3092 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3100 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3093 ep = &vdev->eps[ep_index]; 3101 ep = &vdev->eps[ep_index];
3102 if (!ep)
3103 return;
3094 3104
3095 /* Bail out if toggle is already being cleared by a endpoint reset */ 3105 /* Bail out if toggle is already being cleared by a endpoint reset */
3096 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { 3106 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7a264962a1a9..f5c41448d067 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2175,7 +2175,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
2175 if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && 2175 if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
2176 usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && 2176 usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
2177 urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && 2177 urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE &&
2178 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) 2178 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) &&
2179 !urb->num_sgs)
2179 return true; 2180 return true;
2180 2181
2181 return false; 2182 return false;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index ba05dd80a020..f5bed9f29e56 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
866 dev = usb_get_intfdata(interface); 866 dev = usb_get_intfdata(interface);
867 mutex_lock(&iowarrior_open_disc_lock); 867 mutex_lock(&iowarrior_open_disc_lock);
868 usb_set_intfdata(interface, NULL); 868 usb_set_intfdata(interface, NULL);
869 /* prevent device read, write and ioctl */
870 dev->present = 0;
869 871
870 minor = dev->minor; 872 minor = dev->minor;
873 mutex_unlock(&iowarrior_open_disc_lock);
874 /* give back our minor - this will call close() locks need to be dropped at this point*/
871 875
872 /* give back our minor */
873 usb_deregister_dev(interface, &iowarrior_class); 876 usb_deregister_dev(interface, &iowarrior_class);
874 877
875 mutex_lock(&dev->mutex); 878 mutex_lock(&dev->mutex);
876 879
877 /* prevent device read, write and ioctl */ 880 /* prevent device read, write and ioctl */
878 dev->present = 0;
879 881
880 mutex_unlock(&dev->mutex); 882 mutex_unlock(&dev->mutex);
881 mutex_unlock(&iowarrior_open_disc_lock);
882 883
883 if (dev->opened) { 884 if (dev->opened) {
884 /* There is a process that holds a filedescriptor to the device , 885 /* There is a process that holds a filedescriptor to the device ,
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 27e9c78a791e..a32d61a79ab8 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -51,6 +51,7 @@ struct rio_usb_data {
51 char *obuf, *ibuf; /* transfer buffers */ 51 char *obuf, *ibuf; /* transfer buffers */
52 char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ 52 char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
53 wait_queue_head_t wait_q; /* for timeouts */ 53 wait_queue_head_t wait_q; /* for timeouts */
54 struct mutex lock; /* general race avoidance */
54}; 55};
55 56
56static DEFINE_MUTEX(rio500_mutex); 57static DEFINE_MUTEX(rio500_mutex);
@@ -62,8 +63,10 @@ static int open_rio(struct inode *inode, struct file *file)
62 63
63 /* against disconnect() */ 64 /* against disconnect() */
64 mutex_lock(&rio500_mutex); 65 mutex_lock(&rio500_mutex);
66 mutex_lock(&(rio->lock));
65 67
66 if (rio->isopen || !rio->present) { 68 if (rio->isopen || !rio->present) {
69 mutex_unlock(&(rio->lock));
67 mutex_unlock(&rio500_mutex); 70 mutex_unlock(&rio500_mutex);
68 return -EBUSY; 71 return -EBUSY;
69 } 72 }
@@ -71,6 +74,7 @@ static int open_rio(struct inode *inode, struct file *file)
71 74
72 init_waitqueue_head(&rio->wait_q); 75 init_waitqueue_head(&rio->wait_q);
73 76
77 mutex_unlock(&(rio->lock));
74 78
75 dev_info(&rio->rio_dev->dev, "Rio opened.\n"); 79 dev_info(&rio->rio_dev->dev, "Rio opened.\n");
76 mutex_unlock(&rio500_mutex); 80 mutex_unlock(&rio500_mutex);
@@ -84,6 +88,7 @@ static int close_rio(struct inode *inode, struct file *file)
84 88
85 /* against disconnect() */ 89 /* against disconnect() */
86 mutex_lock(&rio500_mutex); 90 mutex_lock(&rio500_mutex);
91 mutex_lock(&(rio->lock));
87 92
88 rio->isopen = 0; 93 rio->isopen = 0;
89 if (!rio->present) { 94 if (!rio->present) {
@@ -95,6 +100,7 @@ static int close_rio(struct inode *inode, struct file *file)
95 } else { 100 } else {
96 dev_info(&rio->rio_dev->dev, "Rio closed.\n"); 101 dev_info(&rio->rio_dev->dev, "Rio closed.\n");
97 } 102 }
103 mutex_unlock(&(rio->lock));
98 mutex_unlock(&rio500_mutex); 104 mutex_unlock(&rio500_mutex);
99 return 0; 105 return 0;
100} 106}
@@ -109,7 +115,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
109 int retries; 115 int retries;
110 int retval=0; 116 int retval=0;
111 117
112 mutex_lock(&rio500_mutex); 118 mutex_lock(&(rio->lock));
113 /* Sanity check to make sure rio is connected, powered, etc */ 119 /* Sanity check to make sure rio is connected, powered, etc */
114 if (rio->present == 0 || rio->rio_dev == NULL) { 120 if (rio->present == 0 || rio->rio_dev == NULL) {
115 retval = -ENODEV; 121 retval = -ENODEV;
@@ -253,7 +259,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
253 259
254 260
255err_out: 261err_out:
256 mutex_unlock(&rio500_mutex); 262 mutex_unlock(&(rio->lock));
257 return retval; 263 return retval;
258} 264}
259 265
@@ -273,12 +279,12 @@ write_rio(struct file *file, const char __user *buffer,
273 int errn = 0; 279 int errn = 0;
274 int intr; 280 int intr;
275 281
276 intr = mutex_lock_interruptible(&rio500_mutex); 282 intr = mutex_lock_interruptible(&(rio->lock));
277 if (intr) 283 if (intr)
278 return -EINTR; 284 return -EINTR;
279 /* Sanity check to make sure rio is connected, powered, etc */ 285 /* Sanity check to make sure rio is connected, powered, etc */
280 if (rio->present == 0 || rio->rio_dev == NULL) { 286 if (rio->present == 0 || rio->rio_dev == NULL) {
281 mutex_unlock(&rio500_mutex); 287 mutex_unlock(&(rio->lock));
282 return -ENODEV; 288 return -ENODEV;
283 } 289 }
284 290
@@ -301,7 +307,7 @@ write_rio(struct file *file, const char __user *buffer,
301 goto error; 307 goto error;
302 } 308 }
303 if (signal_pending(current)) { 309 if (signal_pending(current)) {
304 mutex_unlock(&rio500_mutex); 310 mutex_unlock(&(rio->lock));
305 return bytes_written ? bytes_written : -EINTR; 311 return bytes_written ? bytes_written : -EINTR;
306 } 312 }
307 313
@@ -339,12 +345,12 @@ write_rio(struct file *file, const char __user *buffer,
339 buffer += copy_size; 345 buffer += copy_size;
340 } while (count > 0); 346 } while (count > 0);
341 347
342 mutex_unlock(&rio500_mutex); 348 mutex_unlock(&(rio->lock));
343 349
344 return bytes_written ? bytes_written : -EIO; 350 return bytes_written ? bytes_written : -EIO;
345 351
346error: 352error:
347 mutex_unlock(&rio500_mutex); 353 mutex_unlock(&(rio->lock));
348 return errn; 354 return errn;
349} 355}
350 356
@@ -361,12 +367,12 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
361 char *ibuf; 367 char *ibuf;
362 int intr; 368 int intr;
363 369
364 intr = mutex_lock_interruptible(&rio500_mutex); 370 intr = mutex_lock_interruptible(&(rio->lock));
365 if (intr) 371 if (intr)
366 return -EINTR; 372 return -EINTR;
367 /* Sanity check to make sure rio is connected, powered, etc */ 373 /* Sanity check to make sure rio is connected, powered, etc */
368 if (rio->present == 0 || rio->rio_dev == NULL) { 374 if (rio->present == 0 || rio->rio_dev == NULL) {
369 mutex_unlock(&rio500_mutex); 375 mutex_unlock(&(rio->lock));
370 return -ENODEV; 376 return -ENODEV;
371 } 377 }
372 378
@@ -377,11 +383,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
377 383
378 while (count > 0) { 384 while (count > 0) {
379 if (signal_pending(current)) { 385 if (signal_pending(current)) {
380 mutex_unlock(&rio500_mutex); 386 mutex_unlock(&(rio->lock));
381 return read_count ? read_count : -EINTR; 387 return read_count ? read_count : -EINTR;
382 } 388 }
383 if (!rio->rio_dev) { 389 if (!rio->rio_dev) {
384 mutex_unlock(&rio500_mutex); 390 mutex_unlock(&(rio->lock));
385 return -ENODEV; 391 return -ENODEV;
386 } 392 }
387 this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; 393 this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
@@ -399,7 +405,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
399 count = this_read = partial; 405 count = this_read = partial;
400 } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ 406 } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
401 if (!maxretry--) { 407 if (!maxretry--) {
402 mutex_unlock(&rio500_mutex); 408 mutex_unlock(&(rio->lock));
403 dev_err(&rio->rio_dev->dev, 409 dev_err(&rio->rio_dev->dev,
404 "read_rio: maxretry timeout\n"); 410 "read_rio: maxretry timeout\n");
405 return -ETIME; 411 return -ETIME;
@@ -409,19 +415,19 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
409 finish_wait(&rio->wait_q, &wait); 415 finish_wait(&rio->wait_q, &wait);
410 continue; 416 continue;
411 } else if (result != -EREMOTEIO) { 417 } else if (result != -EREMOTEIO) {
412 mutex_unlock(&rio500_mutex); 418 mutex_unlock(&(rio->lock));
413 dev_err(&rio->rio_dev->dev, 419 dev_err(&rio->rio_dev->dev,
414 "Read Whoops - result:%d partial:%u this_read:%u\n", 420 "Read Whoops - result:%d partial:%u this_read:%u\n",
415 result, partial, this_read); 421 result, partial, this_read);
416 return -EIO; 422 return -EIO;
417 } else { 423 } else {
418 mutex_unlock(&rio500_mutex); 424 mutex_unlock(&(rio->lock));
419 return (0); 425 return (0);
420 } 426 }
421 427
422 if (this_read) { 428 if (this_read) {
423 if (copy_to_user(buffer, ibuf, this_read)) { 429 if (copy_to_user(buffer, ibuf, this_read)) {
424 mutex_unlock(&rio500_mutex); 430 mutex_unlock(&(rio->lock));
425 return -EFAULT; 431 return -EFAULT;
426 } 432 }
427 count -= this_read; 433 count -= this_read;
@@ -429,7 +435,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
429 buffer += this_read; 435 buffer += this_read;
430 } 436 }
431 } 437 }
432 mutex_unlock(&rio500_mutex); 438 mutex_unlock(&(rio->lock));
433 return read_count; 439 return read_count;
434} 440}
435 441
@@ -494,6 +500,8 @@ static int probe_rio(struct usb_interface *intf,
494 } 500 }
495 dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf); 501 dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
496 502
503 mutex_init(&(rio->lock));
504
497 usb_set_intfdata (intf, rio); 505 usb_set_intfdata (intf, rio);
498 rio->present = 1; 506 rio->present = 1;
499bail_out: 507bail_out:
@@ -511,10 +519,12 @@ static void disconnect_rio(struct usb_interface *intf)
511 if (rio) { 519 if (rio) {
512 usb_deregister_dev(intf, &usb_rio_class); 520 usb_deregister_dev(intf, &usb_rio_class);
513 521
522 mutex_lock(&(rio->lock));
514 if (rio->isopen) { 523 if (rio->isopen) {
515 rio->isopen = 0; 524 rio->isopen = 0;
516 /* better let it finish - the release will do whats needed */ 525 /* better let it finish - the release will do whats needed */
517 rio->rio_dev = NULL; 526 rio->rio_dev = NULL;
527 mutex_unlock(&(rio->lock));
518 mutex_unlock(&rio500_mutex); 528 mutex_unlock(&rio500_mutex);
519 return; 529 return;
520 } 530 }
@@ -524,6 +534,7 @@ static void disconnect_rio(struct usb_interface *intf)
524 dev_info(&intf->dev, "USB Rio disconnected.\n"); 534 dev_info(&intf->dev, "USB Rio disconnected.\n");
525 535
526 rio->present = 0; 536 rio->present = 0;
537 mutex_unlock(&(rio->lock));
527 } 538 }
528 mutex_unlock(&rio500_mutex); 539 mutex_unlock(&rio500_mutex);
529} 540}
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d6ae3795a88..6ca9111d150a 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -375,7 +375,8 @@ out_err:
375 375
376#ifdef CONFIG_OF 376#ifdef CONFIG_OF
377static void usb251xb_get_ports_field(struct usb251xb *hub, 377static void usb251xb_get_ports_field(struct usb251xb *hub,
378 const char *prop_name, u8 port_cnt, u8 *fld) 378 const char *prop_name, u8 port_cnt,
379 bool ds_only, u8 *fld)
379{ 380{
380 struct device *dev = hub->dev; 381 struct device *dev = hub->dev;
381 struct property *prop; 382 struct property *prop;
@@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub,
383 u32 port; 384 u32 port;
384 385
385 of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) { 386 of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
386 if ((port >= 1) && (port <= port_cnt)) 387 if ((port >= ds_only ? 1 : 0) && (port <= port_cnt))
387 *fld |= BIT(port); 388 *fld |= BIT(port);
388 else 389 else
389 dev_warn(dev, "port %u doesn't exist\n", port); 390 dev_warn(dev, "port %u doesn't exist\n", port);
@@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
501 502
502 hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES; 503 hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES;
503 usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt, 504 usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt,
504 &hub->non_rem_dev); 505 true, &hub->non_rem_dev);
505 506
506 hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF; 507 hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF;
507 usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt, 508 usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt,
508 &hub->port_disable_sp); 509 true, &hub->port_disable_sp);
509 510
510 hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS; 511 hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS;
511 usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt, 512 usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt,
512 &hub->port_disable_bp); 513 true, &hub->port_disable_bp);
513 514
514 hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; 515 hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
515 if (!of_property_read_u32(np, "sp-max-total-current-microamp", 516 if (!of_property_read_u32(np, "sp-max-total-current-microamp",
@@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
573 */ 574 */
574 hub->port_swap = USB251XB_DEF_PORT_SWAP; 575 hub->port_swap = USB251XB_DEF_PORT_SWAP;
575 usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt, 576 usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt,
576 &hub->port_swap); 577 false, &hub->port_swap);
577 if (of_get_property(np, "swap-us-lanes", NULL))
578 hub->port_swap |= BIT(0);
579 578
580 /* The following parameters are currently not exposed to devicetree, but 579 /* The following parameters are currently not exposed to devicetree, but
581 * may be as soon as needed. 580 * may be as soon as needed.
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 7b306aa22d25..6715a128e6c8 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
92 92
93 dev_dbg(&dev->interface->dev, "%s\n", __func__); 93 dev_dbg(&dev->interface->dev, "%s\n", __func__);
94 94
95 usb_put_dev(dev->udev);
96 if (dev->cntl_urb) { 95 if (dev->cntl_urb) {
97 usb_kill_urb(dev->cntl_urb); 96 usb_kill_urb(dev->cntl_urb);
98 kfree(dev->cntl_req); 97 kfree(dev->cntl_req);
@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
108 dev->int_buffer, dev->urb->transfer_dma); 107 dev->int_buffer, dev->urb->transfer_dma);
109 usb_free_urb(dev->urb); 108 usb_free_urb(dev->urb);
110 } 109 }
110 usb_put_dev(dev->udev);
111 kfree(dev); 111 kfree(dev);
112} 112}
113 113
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c1582fbd1150..38e920ac7f82 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, 968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, 969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
970 970
971 /* Motorola devices */
972 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
973 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
974 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
975 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
971 976
972 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 977 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
973 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 978 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
1549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ 1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1550 .driver_info = RSVD(2) }, 1555 .driver_info = RSVD(2) },
1551 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ 1556 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1557 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
1552 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1558 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
1952 .driver_info = RSVD(4) }, 1958 .driver_info = RSVD(4) },
1953 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1959 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1954 .driver_info = RSVD(4) }, 1960 .driver_info = RSVD(4) },
1961 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
1962 .driver_info = RSVD(4) },
1955 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1963 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1956 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1964 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1957 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1965 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1958 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ 1966 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1959 .driver_info = RSVD(4) }, 1967 .driver_info = RSVD(4) },
1968 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
1969 .driver_info = RSVD(4) },
1960 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1970 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1961 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1971 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1962 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1972 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index cc794e25a0b6..1d9ce9cbc831 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
38 38
39static int auto_delink_en = 1; 39static int auto_delink_en = 1;
40module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); 40module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); 41MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
42 42
43#ifdef CONFIG_REALTEK_AUTOPM 43#ifdef CONFIG_REALTEK_AUTOPM
44static int ss_en = 1; 44static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
996 goto INIT_FAIL; 996 goto INIT_FAIL;
997 } 997 }
998 998
999 if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || 999 if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
1000 CHECK_FW_VER(chip, 0x5901)) 1000 CHECK_PID(chip, 0x0159)) {
1001 SET_AUTO_DELINK(chip); 1001 if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1002 if (STATUS_LEN(chip) == 16) { 1002 CHECK_FW_VER(chip, 0x5901))
1003 if (SUPPORT_AUTO_DELINK(chip))
1004 SET_AUTO_DELINK(chip); 1003 SET_AUTO_DELINK(chip);
1004 if (STATUS_LEN(chip) == 16) {
1005 if (SUPPORT_AUTO_DELINK(chip))
1006 SET_AUTO_DELINK(chip);
1007 }
1005 } 1008 }
1006#ifdef CONFIG_REALTEK_AUTOPM 1009#ifdef CONFIG_REALTEK_AUTOPM
1007 if (ss_en) 1010 if (ss_en)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 30790240aec6..05b80211290d 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -28,6 +28,8 @@
28 * status of a command. 28 * status of a command.
29 */ 29 */
30 30
31#include <linux/blkdev.h>
32#include <linux/dma-mapping.h>
31#include <linux/module.h> 33#include <linux/module.h>
32#include <linux/mutex.h> 34#include <linux/mutex.h>
33 35
@@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev)
99static int slave_configure(struct scsi_device *sdev) 101static int slave_configure(struct scsi_device *sdev)
100{ 102{
101 struct us_data *us = host_to_us(sdev->host); 103 struct us_data *us = host_to_us(sdev->host);
104 struct device *dev = us->pusb_dev->bus->sysdev;
102 105
103 /* 106 /*
104 * Many devices have trouble transferring more than 32KB at a time, 107 * Many devices have trouble transferring more than 32KB at a time,
@@ -129,6 +132,14 @@ static int slave_configure(struct scsi_device *sdev)
129 } 132 }
130 133
131 /* 134 /*
135 * The max_hw_sectors should be up to maximum size of a mapping for
136 * the device. Otherwise, a DMA API might fail on swiotlb environment.
137 */
138 blk_queue_max_hw_sectors(sdev->request_queue,
139 min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
140 dma_max_mapping_size(dev) >> SECTOR_SHIFT));
141
142 /*
132 * Some USB host controllers can't do DMA; they have to use PIO. 143 * Some USB host controllers can't do DMA; they have to use PIO.
133 * They indicate this by setting their dma_mask to NULL. For 144 * They indicate this by setting their dma_mask to NULL. For
134 * such controllers we need to make sure the block layer sets 145 * such controllers we need to make sure the block layer sets
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ea0d27a94afe..1cd9b6305b06 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
2100 US_FL_IGNORE_RESIDUE ), 2100 US_FL_IGNORE_RESIDUE ),
2101 2101
2102/* Reported by Michael Büsch <m@bues.ch> */ 2102/* Reported by Michael Büsch <m@bues.ch> */
2103UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, 2103UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
2104 "JMicron", 2104 "JMicron",
2105 "USB to ATA/ATAPI Bridge", 2105 "USB to ATA/ATAPI Bridge",
2106 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2106 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index fba32d84e578..bcfdb55fd198 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -379,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
379 return SNK_UNATTACHED; 379 return SNK_UNATTACHED;
380 else if (port->try_role == TYPEC_SOURCE) 380 else if (port->try_role == TYPEC_SOURCE)
381 return SRC_UNATTACHED; 381 return SRC_UNATTACHED;
382 else if (port->tcpc->config->default_role == TYPEC_SINK) 382 else if (port->tcpc->config &&
383 port->tcpc->config->default_role == TYPEC_SINK)
383 return SNK_UNATTACHED; 384 return SNK_UNATTACHED;
384 /* Fall through to return SRC_UNATTACHED */ 385 /* Fall through to return SRC_UNATTACHED */
385 } else if (port->port_type == TYPEC_PORT_SNK) { 386 } else if (port->port_type == TYPEC_PORT_SNK) {
@@ -586,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
586 587
587static void tcpm_debugfs_exit(struct tcpm_port *port) 588static void tcpm_debugfs_exit(struct tcpm_port *port)
588{ 589{
590 int i;
591
592 mutex_lock(&port->logbuffer_lock);
593 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
594 kfree(port->logbuffer[i]);
595 port->logbuffer[i] = NULL;
596 }
597 mutex_unlock(&port->logbuffer_lock);
598
589 debugfs_remove(port->dentry); 599 debugfs_remove(port->dentry);
600 if (list_empty(&rootdir->d_subdirs)) {
601 debugfs_remove(rootdir);
602 rootdir = NULL;
603 }
590} 604}
591 605
592#else 606#else
@@ -1095,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1095 break; 1109 break;
1096 case CMD_ATTENTION: 1110 case CMD_ATTENTION:
1097 /* Attention command does not have response */ 1111 /* Attention command does not have response */
1098 typec_altmode_attention(adev, p[1]); 1112 if (adev)
1113 typec_altmode_attention(adev, p[1]);
1099 return 0; 1114 return 0;
1100 default: 1115 default:
1101 break; 1116 break;
@@ -1147,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1147 } 1162 }
1148 break; 1163 break;
1149 case CMD_ENTER_MODE: 1164 case CMD_ENTER_MODE:
1150 typec_altmode_update_active(pdev, true); 1165 if (adev && pdev) {
1151 1166 typec_altmode_update_active(pdev, true);
1152 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { 1167
1153 response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE); 1168 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1154 response[0] |= VDO_OPOS(adev->mode); 1169 response[0] = VDO(adev->svid, 1,
1155 return 1; 1170 CMD_EXIT_MODE);
1171 response[0] |= VDO_OPOS(adev->mode);
1172 return 1;
1173 }
1156 } 1174 }
1157 return 0; 1175 return 0;
1158 case CMD_EXIT_MODE: 1176 case CMD_EXIT_MODE:
1159 typec_altmode_update_active(pdev, false); 1177 if (adev && pdev) {
1178 typec_altmode_update_active(pdev, false);
1160 1179
1161 /* Back to USB Operation */ 1180 /* Back to USB Operation */
1162 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, 1181 WARN_ON(typec_altmode_notify(adev,
1163 NULL)); 1182 TYPEC_STATE_USB,
1183 NULL));
1184 }
1164 break; 1185 break;
1165 default: 1186 default:
1166 break; 1187 break;
@@ -1170,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1170 switch (cmd) { 1191 switch (cmd) {
1171 case CMD_ENTER_MODE: 1192 case CMD_ENTER_MODE:
1172 /* Back to USB Operation */ 1193 /* Back to USB Operation */
1173 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, 1194 if (adev)
1174 NULL)); 1195 WARN_ON(typec_altmode_notify(adev,
1196 TYPEC_STATE_USB,
1197 NULL));
1175 break; 1198 break;
1176 default: 1199 default:
1177 break; 1200 break;
@@ -1182,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1182 } 1205 }
1183 1206
1184 /* Informing the alternate mode drivers about everything */ 1207 /* Informing the alternate mode drivers about everything */
1185 typec_altmode_vdm(adev, p[0], &p[1], cnt); 1208 if (adev)
1209 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1186 1210
1187 return rlen; 1211 return rlen;
1188} 1212}
@@ -1422,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1422 else if ((pdo_min_voltage(pdo[i]) == 1446 else if ((pdo_min_voltage(pdo[i]) ==
1423 pdo_min_voltage(pdo[i - 1])) && 1447 pdo_min_voltage(pdo[i - 1])) &&
1424 (pdo_max_voltage(pdo[i]) == 1448 (pdo_max_voltage(pdo[i]) ==
1425 pdo_min_voltage(pdo[i - 1]))) 1449 pdo_max_voltage(pdo[i - 1])))
1426 return PDO_ERR_DUPE_PDO; 1450 return PDO_ERR_DUPE_PDO;
1427 break; 1451 break;
1428 /* 1452 /*
@@ -4114,7 +4138,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
4114 mutex_lock(&port->lock); 4138 mutex_lock(&port->lock);
4115 if (tcpc->try_role) 4139 if (tcpc->try_role)
4116 ret = tcpc->try_role(tcpc, role); 4140 ret = tcpc->try_role(tcpc, role);
4117 if (!ret && !tcpc->config->try_role_hw) 4141 if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
4118 port->try_role = role; 4142 port->try_role = role;
4119 port->try_src_count = 0; 4143 port->try_src_count = 0;
4120 port->try_snk_count = 0; 4144 port->try_snk_count = 0;
@@ -4701,7 +4725,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
4701 port->typec_caps.prefer_role = tcfg->default_role; 4725 port->typec_caps.prefer_role = tcfg->default_role;
4702 port->typec_caps.type = tcfg->type; 4726 port->typec_caps.type = tcfg->type;
4703 port->typec_caps.data = tcfg->data; 4727 port->typec_caps.data = tcfg->data;
4704 port->self_powered = port->tcpc->config->self_powered; 4728 port->self_powered = tcfg->self_powered;
4705 4729
4706 return 0; 4730 return 0;
4707} 4731}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index f7a79a23ebed..8e9f8fba55af 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -1018,7 +1018,7 @@ release_fw:
1018 ******************************************************************************/ 1018 ******************************************************************************/
1019static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode) 1019static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1020{ 1020{
1021 int err; 1021 int err = 0;
1022 1022
1023 while (flash_mode != FLASH_NOT_NEEDED) { 1023 while (flash_mode != FLASH_NOT_NEEDED) {
1024 err = do_flash(uc, flash_mode); 1024 err = do_flash(uc, flash_mode);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 819296332913..42a8c2a13ab1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -96,7 +96,7 @@ struct vhost_uaddr {
96}; 96};
97 97
98#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 98#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
99#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 99#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
100#else 100#else
101#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 101#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
102#endif 102#endif
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 92f23e3bc27a..7cacae5a8797 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt)
858 case 'M': 858 case 'M':
859 case 'm': 859 case 'm':
860 size *= 1024; 860 size *= 1024;
861 /* Fall through */
861 case 'K': 862 case 'K':
862 case 'k': 863 case 'k':
863 size *= 1024; 864 size *= 1024;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 90eca64e3144..702cca59bda1 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -447,6 +447,7 @@ static int set_color_mode(struct omapfb_plane_struct *plane,
447 return 0; 447 return 0;
448 case 12: 448 case 12:
449 var->bits_per_pixel = 16; 449 var->bits_per_pixel = 16;
450 /* fall through */
450 case 16: 451 case 16:
451 if (plane->fbdev->panel->bpp == 12) 452 if (plane->fbdev->panel->bpp == 12)
452 plane->color_mode = OMAPFB_COLOR_RGB444; 453 plane->color_mode = OMAPFB_COLOR_RGB444;
@@ -1534,20 +1535,27 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
1534 case OMAPFB_ACTIVE: 1535 case OMAPFB_ACTIVE:
1535 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) 1536 for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
1536 unregister_framebuffer(fbdev->fb_info[i]); 1537 unregister_framebuffer(fbdev->fb_info[i]);
1538 /* fall through */
1537 case 7: 1539 case 7:
1538 omapfb_unregister_sysfs(fbdev); 1540 omapfb_unregister_sysfs(fbdev);
1541 /* fall through */
1539 case 6: 1542 case 6:
1540 if (fbdev->panel->disable) 1543 if (fbdev->panel->disable)
1541 fbdev->panel->disable(fbdev->panel); 1544 fbdev->panel->disable(fbdev->panel);
1545 /* fall through */
1542 case 5: 1546 case 5:
1543 omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED); 1547 omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
1548 /* fall through */
1544 case 4: 1549 case 4:
1545 planes_cleanup(fbdev); 1550 planes_cleanup(fbdev);
1551 /* fall through */
1546 case 3: 1552 case 3:
1547 ctrl_cleanup(fbdev); 1553 ctrl_cleanup(fbdev);
1554 /* fall through */
1548 case 2: 1555 case 2:
1549 if (fbdev->panel->cleanup) 1556 if (fbdev->panel->cleanup)
1550 fbdev->panel->cleanup(fbdev->panel); 1557 fbdev->panel->cleanup(fbdev->panel);
1558 /* fall through */
1551 case 1: 1559 case 1:
1552 dev_set_drvdata(fbdev->dev, NULL); 1560 dev_set_drvdata(fbdev->dev, NULL);
1553 kfree(fbdev); 1561 kfree(fbdev);
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b9b2d06b3879..668a1c704f28 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -235,6 +235,7 @@ static long ar7_wdt_ioctl(struct file *file,
235 ar7_wdt_update_margin(new_margin); 235 ar7_wdt_update_margin(new_margin);
236 ar7_wdt_kick(1); 236 ar7_wdt_kick(1);
237 spin_unlock(&wdt_lock); 237 spin_unlock(&wdt_lock);
238 /* Fall through */
238 239
239 case WDIOC_GETTIMEOUT: 240 case WDIOC_GETTIMEOUT:
240 if (put_user(margin, (int *)arg)) 241 if (put_user(margin, (int *)arg))
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 1b2cf5b95a89..c3c93e00b320 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -651,7 +651,7 @@ static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
651 return -EINVAL; 651 return -EINVAL;
652 652
653 pcwd_keepalive(); 653 pcwd_keepalive();
654 /* Fall */ 654 /* Fall through */
655 655
656 case WDIOC_GETTIMEOUT: 656 case WDIOC_GETTIMEOUT:
657 return put_user(heartbeat, argp); 657 return put_user(heartbeat, argp);
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 41a2a11535a6..b35f7be20c00 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -134,7 +134,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
134 return -EINVAL; 134 return -EINVAL;
135 riowd_timeout = (new_margin + 59) / 60; 135 riowd_timeout = (new_margin + 59) / 60;
136 riowd_writereg(p, riowd_timeout, WDTO_INDEX); 136 riowd_writereg(p, riowd_timeout, WDTO_INDEX);
137 /* Fall */ 137 /* Fall through */
138 138
139 case WDIOC_GETTIMEOUT: 139 case WDIOC_GETTIMEOUT:
140 return put_user(riowd_timeout * 60, (int __user *)argp); 140 return put_user(riowd_timeout * 60, (int __user *)argp);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 5a6ced7a7e8f..202fc8d8ca5f 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -202,6 +202,7 @@ static long sbwdog_ioctl(struct file *file, unsigned int cmd,
202 timeout = time; 202 timeout = time;
203 sbwdog_set(user_dog, timeout); 203 sbwdog_set(user_dog, timeout);
204 sbwdog_pet(user_dog); 204 sbwdog_pet(user_dog);
205 /* Fall through */
205 206
206 case WDIOC_GETTIMEOUT: 207 case WDIOC_GETTIMEOUT:
207 /* 208 /*
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index efd7996694de..46268309ee9b 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -186,6 +186,7 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
186 margin = new_margin; 186 margin = new_margin;
187 scx200_wdt_update_margin(); 187 scx200_wdt_update_margin();
188 scx200_wdt_ping(); 188 scx200_wdt_ping();
189 /* Fall through */
189 case WDIOC_GETTIMEOUT: 190 case WDIOC_GETTIMEOUT:
190 if (put_user(margin, p)) 191 if (put_user(margin, p))
191 return -EFAULT; 192 return -EFAULT;
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 0650100fad00..7d278b37e083 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -389,7 +389,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
389 if (wdt_set_heartbeat(new_heartbeat)) 389 if (wdt_set_heartbeat(new_heartbeat))
390 return -EINVAL; 390 return -EINVAL;
391 wdt_ping(); 391 wdt_ping();
392 /* Fall */ 392 /* Fall through */
393 case WDIOC_GETTIMEOUT: 393 case WDIOC_GETTIMEOUT:
394 return put_user(heartbeat, p); 394 return put_user(heartbeat, p);
395 default: 395 default:
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 4eacfb1ce1ac..eb729d704836 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
168 soft_margin = new_margin; 168 soft_margin = new_margin;
169 reload = soft_margin * (mem_fclk_21285 / 256); 169 reload = soft_margin * (mem_fclk_21285 / 256);
170 watchdog_ping(); 170 watchdog_ping();
171 /* Fall */ 171 /* Fall through */
172 case WDIOC_GETTIMEOUT: 172 case WDIOC_GETTIMEOUT:
173 ret = put_user(soft_margin, int_arg); 173 ret = put_user(soft_margin, int_arg);
174 break; 174 break;
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 567005d7598e..5c52c73e1839 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -398,7 +398,7 @@ static long wdt977_ioctl(struct file *file, unsigned int cmd,
398 return -EINVAL; 398 return -EINVAL;
399 399
400 wdt977_keepalive(); 400 wdt977_keepalive();
401 /* Fall */ 401 /* Fall through */
402 402
403 case WDIOC_GETTIMEOUT: 403 case WDIOC_GETTIMEOUT:
404 return put_user(timeout, uarg.i); 404 return put_user(timeout, uarg.i);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4c339c7e66e5..a446a7221e13 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1143 goto out_put_map; 1143 goto out_put_map;
1144 1144
1145 if (!use_ptemod) { 1145 if (!use_ptemod) {
1146 err = vm_map_pages(vma, map->pages, map->count); 1146 err = vm_map_pages_zero(vma, map->pages, map->count);
1147 if (err) 1147 if (err)
1148 goto out_put_map; 1148 goto out_put_map;
1149 } else { 1149 } else {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 2f5ce7230a43..c6070e70dd73 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
724 return 0; 724 return 0;
725} 725}
726 726
727struct remap_pfn {
728 struct mm_struct *mm;
729 struct page **pages;
730 pgprot_t prot;
731 unsigned long i;
732};
733
734static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
735{
736 struct remap_pfn *r = data;
737 struct page *page = r->pages[r->i];
738 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
739
740 set_pte_at(r->mm, addr, ptep, pte);
741 r->i++;
742
743 return 0;
744}
745
746static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) 727static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
747{ 728{
748 struct privcmd_data *data = file->private_data; 729 struct privcmd_data *data = file->private_data;
@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
774 goto out; 755 goto out;
775 } 756 }
776 757
777 if (xen_feature(XENFEAT_auto_translated_physmap)) { 758 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
759 xen_feature(XENFEAT_auto_translated_physmap)) {
778 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE); 760 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
779 struct page **pages; 761 struct page **pages;
780 unsigned int i; 762 unsigned int i;
@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
808 if (rc) 790 if (rc)
809 goto out; 791 goto out;
810 792
811 if (xen_feature(XENFEAT_auto_translated_physmap)) { 793 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
812 struct remap_pfn r = { 794 xen_feature(XENFEAT_auto_translated_physmap)) {
813 .mm = vma->vm_mm, 795 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
814 .pages = vma->vm_private_data,
815 .prot = vma->vm_page_prot,
816 };
817
818 rc = apply_to_page_range(r.mm, kdata.addr,
819 kdata.num << PAGE_SHIFT,
820 remap_pfn_fn, &r);
821 } else { 796 } else {
822 unsigned int domid = 797 unsigned int domid =
823 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ? 798 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index cfbe46785a3b..ae1df496bf38 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
83 return xen_phys_to_bus(virt_to_phys(address)); 83 return xen_phys_to_bus(virt_to_phys(address));
84} 84}
85 85
86static int check_pages_physically_contiguous(unsigned long xen_pfn, 86static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
87 unsigned int offset,
88 size_t length)
89{ 87{
90 unsigned long next_bfn; 88 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
91 int i; 89 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
92 int nr_pages;
93 90
94 next_bfn = pfn_to_bfn(xen_pfn); 91 next_bfn = pfn_to_bfn(xen_pfn);
95 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
96 92
97 for (i = 1; i < nr_pages; i++) { 93 for (i = 1; i < nr_pages; i++)
98 if (pfn_to_bfn(++xen_pfn) != ++next_bfn) 94 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
99 return 0; 95 return 1;
100 }
101 return 1;
102}
103 96
104static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) 97 return 0;
105{
106 unsigned long xen_pfn = XEN_PFN_DOWN(p);
107 unsigned int offset = p & ~XEN_PAGE_MASK;
108
109 if (offset + size <= XEN_PAGE_SIZE)
110 return 0;
111 if (check_pages_physically_contiguous(xen_pfn, offset, size))
112 return 0;
113 return 1;
114} 98}
115 99
116static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) 100static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
338 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); 322 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
339 return NULL; 323 return NULL;
340 } 324 }
325 SetPageXenRemapped(virt_to_page(ret));
341 } 326 }
342 memset(ret, 0, size); 327 memset(ret, 0, size);
343 return ret; 328 return ret;
@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
361 /* Convert the size to actually allocated. */ 346 /* Convert the size to actually allocated. */
362 size = 1UL << (order + XEN_PAGE_SHIFT); 347 size = 1UL << (order + XEN_PAGE_SHIFT);
363 348
364 if (((dev_addr + size - 1 <= dma_mask)) || 349 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
365 range_straddles_page_boundary(phys, size)) 350 range_straddles_page_boundary(phys, size)) &&
351 TestClearPageXenRemapped(virt_to_page(vaddr)))
366 xen_destroy_contiguous_region(phys, order); 352 xen_destroy_contiguous_region(phys, order);
367 353
368 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); 354 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e0116..e5694133ebe5 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
116{ 116{
117 int err; 117 int err;
118 u16 old_value; 118 u16 old_value;
119 pci_power_t new_state, old_state; 119 pci_power_t new_state;
120 120
121 err = pci_read_config_word(dev, offset, &old_value); 121 err = pci_read_config_word(dev, offset, &old_value);
122 if (err) 122 if (err)
123 goto out; 123 goto out;
124 124
125 old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
126 new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); 125 new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
127 126
128 new_value &= PM_OK_BITS; 127 new_value &= PM_OK_BITS;
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index ba883a80b3c0..7b1077f0abcb 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
262 return 0; 262 return 0;
263} 263}
264EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); 264EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
265
266struct remap_pfn {
267 struct mm_struct *mm;
268 struct page **pages;
269 pgprot_t prot;
270 unsigned long i;
271};
272
273static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
274{
275 struct remap_pfn *r = data;
276 struct page *page = r->pages[r->i];
277 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
278
279 set_pte_at(r->mm, addr, ptep, pte);
280 r->i++;
281
282 return 0;
283}
284
285/* Used by the privcmd module, but has to be built-in on ARM */
286int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
287{
288 struct remap_pfn r = {
289 .mm = vma->vm_mm,
290 .pages = vma->vm_private_data,
291 .prot = vma->vm_page_prot,
292 };
293
294 return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
295}
296EXPORT_SYMBOL_GPL(xen_remap_vma_range);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
74 cell = rcu_dereference_raw(net->ws_cell); 74 cell = rcu_dereference_raw(net->ws_cell);
75 if (cell) { 75 if (cell) {
76 afs_get_cell(cell); 76 afs_get_cell(cell);
77 ret = 0;
77 break; 78 break;
78 } 79 }
79 ret = -EDESTADDRREQ; 80 ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
108 109
109 done_seqretry(&net->cells_lock, seq); 110 done_seqretry(&net->cells_lock, seq);
110 111
112 if (ret != 0 && cell)
113 afs_put_cell(net, cell);
114
111 return ret == 0 ? cell : ERR_PTR(ret); 115 return ret == 0 ? cell : ERR_PTR(ret);
112} 116}
113 117
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4f1b6f466ff5..b86195e4dc6c 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
505 struct afs_call *call = container_of(work, struct afs_call, work); 505 struct afs_call *call = container_of(work, struct afs_call, work);
506 struct afs_uuid *r = call->request; 506 struct afs_uuid *r = call->request;
507 507
508 struct {
509 __be32 match;
510 } reply;
511
512 _enter(""); 508 _enter("");
513 509
514 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) 510 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
515 reply.match = htonl(0); 511 afs_send_empty_reply(call);
516 else 512 else
517 reply.match = htonl(1); 513 rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
514 1, 1, "K-1");
518 515
519 afs_send_simple_reply(call, &reply, sizeof(reply));
520 afs_put_call(call); 516 afs_put_call(call);
521 _leave(""); 517 _leave("");
522} 518}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e640d67274be..139b4e3cc946 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
440 * iterate through the data blob that lists the contents of an AFS directory 440 * iterate through the data blob that lists the contents of an AFS directory
441 */ 441 */
442static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, 442static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
443 struct key *key) 443 struct key *key, afs_dataversion_t *_dir_version)
444{ 444{
445 struct afs_vnode *dvnode = AFS_FS_I(dir); 445 struct afs_vnode *dvnode = AFS_FS_I(dir);
446 struct afs_xdr_dir_page *dbuf; 446 struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
460 req = afs_read_dir(dvnode, key); 460 req = afs_read_dir(dvnode, key);
461 if (IS_ERR(req)) 461 if (IS_ERR(req))
462 return PTR_ERR(req); 462 return PTR_ERR(req);
463 *_dir_version = req->data_version;
463 464
464 /* round the file position up to the next entry boundary */ 465 /* round the file position up to the next entry boundary */
465 ctx->pos += sizeof(union afs_xdr_dirent) - 1; 466 ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
514 */ 515 */
515static int afs_readdir(struct file *file, struct dir_context *ctx) 516static int afs_readdir(struct file *file, struct dir_context *ctx)
516{ 517{
517 return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); 518 afs_dataversion_t dir_version;
519
520 return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
521 &dir_version);
518} 522}
519 523
520/* 524/*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
555 * - just returns the FID the dentry name maps to if found 559 * - just returns the FID the dentry name maps to if found
556 */ 560 */
557static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, 561static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
558 struct afs_fid *fid, struct key *key) 562 struct afs_fid *fid, struct key *key,
563 afs_dataversion_t *_dir_version)
559{ 564{
560 struct afs_super_info *as = dir->i_sb->s_fs_info; 565 struct afs_super_info *as = dir->i_sb->s_fs_info;
561 struct afs_lookup_one_cookie cookie = { 566 struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
568 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 573 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
569 574
570 /* search the directory */ 575 /* search the directory */
571 ret = afs_dir_iterate(dir, &cookie.ctx, key); 576 ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
572 if (ret < 0) { 577 if (ret < 0) {
573 _leave(" = %d [iter]", ret); 578 _leave(" = %d [iter]", ret);
574 return ret; 579 return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
642 struct afs_server *server; 647 struct afs_server *server;
643 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 648 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
644 struct inode *inode = NULL, *ti; 649 struct inode *inode = NULL, *ti;
650 afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
645 int ret, i; 651 int ret, i;
646 652
647 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 653 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
669 cookie->fids[i].vid = as->volume->vid; 675 cookie->fids[i].vid = as->volume->vid;
670 676
671 /* search the directory */ 677 /* search the directory */
672 ret = afs_dir_iterate(dir, &cookie->ctx, key); 678 ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
673 if (ret < 0) { 679 if (ret < 0) {
674 inode = ERR_PTR(ret); 680 inode = ERR_PTR(ret);
675 goto out; 681 goto out;
676 } 682 }
677 683
684 dentry->d_fsdata = (void *)(unsigned long)data_version;
685
678 inode = ERR_PTR(-ENOENT); 686 inode = ERR_PTR(-ENOENT);
679 if (!cookie->found) 687 if (!cookie->found)
680 goto out; 688 goto out;
@@ -951,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
951 inode ? AFS_FS_I(inode) : NULL); 959 inode ? AFS_FS_I(inode) : NULL);
952 } else { 960 } else {
953 trace_afs_lookup(dvnode, &dentry->d_name, 961 trace_afs_lookup(dvnode, &dentry->d_name,
954 inode ? AFS_FS_I(inode) : NULL); 962 IS_ERR_OR_NULL(inode) ? NULL
963 : AFS_FS_I(inode));
955 } 964 }
956 return d; 965 return d;
957} 966}
@@ -968,7 +977,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
968 struct dentry *parent; 977 struct dentry *parent;
969 struct inode *inode; 978 struct inode *inode;
970 struct key *key; 979 struct key *key;
971 long dir_version, de_version; 980 afs_dataversion_t dir_version;
981 long de_version;
972 int ret; 982 int ret;
973 983
974 if (flags & LOOKUP_RCU) 984 if (flags & LOOKUP_RCU)
@@ -1014,20 +1024,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
1014 * on a 32-bit system, we only have 32 bits in the dentry to store the 1024 * on a 32-bit system, we only have 32 bits in the dentry to store the
1015 * version. 1025 * version.
1016 */ 1026 */
1017 dir_version = (long)dir->status.data_version; 1027 dir_version = dir->status.data_version;
1018 de_version = (long)dentry->d_fsdata; 1028 de_version = (long)dentry->d_fsdata;
1019 if (de_version == dir_version) 1029 if (de_version == (long)dir_version)
1020 goto out_valid; 1030 goto out_valid_noupdate;
1021 1031
1022 dir_version = (long)dir->invalid_before; 1032 dir_version = dir->invalid_before;
1023 if (de_version - dir_version >= 0) 1033 if (de_version - (long)dir_version >= 0)
1024 goto out_valid; 1034 goto out_valid;
1025 1035
1026 _debug("dir modified"); 1036 _debug("dir modified");
1027 afs_stat_v(dir, n_reval); 1037 afs_stat_v(dir, n_reval);
1028 1038
1029 /* search the directory for this vnode */ 1039 /* search the directory for this vnode */
1030 ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); 1040 ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
1031 switch (ret) { 1041 switch (ret) {
1032 case 0: 1042 case 0:
1033 /* the filename maps to something */ 1043 /* the filename maps to something */
@@ -1080,7 +1090,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
1080 } 1090 }
1081 1091
1082out_valid: 1092out_valid:
1083 dentry->d_fsdata = (void *)dir_version; 1093 dentry->d_fsdata = (void *)(unsigned long)dir_version;
1094out_valid_noupdate:
1084 dput(parent); 1095 dput(parent);
1085 key_put(key); 1096 key_put(key);
1086 _leave(" = 1 [valid]"); 1097 _leave(" = 1 [valid]");
@@ -1186,6 +1197,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
1186} 1197}
1187 1198
1188/* 1199/*
1200 * Note that a dentry got changed. We need to set d_fsdata to the data version
1201 * number derived from the result of the operation. It doesn't matter if
1202 * d_fsdata goes backwards as we'll just revalidate.
1203 */
1204static void afs_update_dentry_version(struct afs_fs_cursor *fc,
1205 struct dentry *dentry,
1206 struct afs_status_cb *scb)
1207{
1208 if (fc->ac.error == 0)
1209 dentry->d_fsdata =
1210 (void *)(unsigned long)scb->status.data_version;
1211}
1212
1213/*
1189 * create a directory on an AFS filesystem 1214 * create a directory on an AFS filesystem
1190 */ 1215 */
1191static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1216static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -1227,6 +1252,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1227 afs_check_for_remote_deletion(&fc, dvnode); 1252 afs_check_for_remote_deletion(&fc, dvnode);
1228 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1253 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1229 &data_version, &scb[0]); 1254 &data_version, &scb[0]);
1255 afs_update_dentry_version(&fc, dentry, &scb[0]);
1230 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1256 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1231 ret = afs_end_vnode_operation(&fc); 1257 ret = afs_end_vnode_operation(&fc);
1232 if (ret < 0) 1258 if (ret < 0)
@@ -1319,6 +1345,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
1319 1345
1320 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1346 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1321 &data_version, scb); 1347 &data_version, scb);
1348 afs_update_dentry_version(&fc, dentry, scb);
1322 ret = afs_end_vnode_operation(&fc); 1349 ret = afs_end_vnode_operation(&fc);
1323 if (ret == 0) { 1350 if (ret == 0) {
1324 afs_dir_remove_subdir(dentry); 1351 afs_dir_remove_subdir(dentry);
@@ -1458,6 +1485,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
1458 &data_version, &scb[0]); 1485 &data_version, &scb[0]);
1459 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1486 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
1460 &data_version_2, &scb[1]); 1487 &data_version_2, &scb[1]);
1488 afs_update_dentry_version(&fc, dentry, &scb[0]);
1461 ret = afs_end_vnode_operation(&fc); 1489 ret = afs_end_vnode_operation(&fc);
1462 if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) 1490 if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
1463 ret = afs_dir_remove_link(dvnode, dentry, key); 1491 ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1554,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1526 afs_check_for_remote_deletion(&fc, dvnode); 1554 afs_check_for_remote_deletion(&fc, dvnode);
1527 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1555 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1528 &data_version, &scb[0]); 1556 &data_version, &scb[0]);
1557 afs_update_dentry_version(&fc, dentry, &scb[0]);
1529 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1558 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1530 ret = afs_end_vnode_operation(&fc); 1559 ret = afs_end_vnode_operation(&fc);
1531 if (ret < 0) 1560 if (ret < 0)
@@ -1607,6 +1636,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
1607 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1636 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
1608 NULL, &scb[1]); 1637 NULL, &scb[1]);
1609 ihold(&vnode->vfs_inode); 1638 ihold(&vnode->vfs_inode);
1639 afs_update_dentry_version(&fc, dentry, &scb[0]);
1610 d_instantiate(dentry, &vnode->vfs_inode); 1640 d_instantiate(dentry, &vnode->vfs_inode);
1611 1641
1612 mutex_unlock(&vnode->io_lock); 1642 mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1716,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
1686 afs_check_for_remote_deletion(&fc, dvnode); 1716 afs_check_for_remote_deletion(&fc, dvnode);
1687 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1717 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1688 &data_version, &scb[0]); 1718 &data_version, &scb[0]);
1719 afs_update_dentry_version(&fc, dentry, &scb[0]);
1689 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1720 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1690 ret = afs_end_vnode_operation(&fc); 1721 ret = afs_end_vnode_operation(&fc);
1691 if (ret < 0) 1722 if (ret < 0)
@@ -1791,6 +1822,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1791 } 1822 }
1792 } 1823 }
1793 1824
1825 /* This bit is potentially nasty as there's a potential race with
1826 * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
1827 * to reflect it's new parent's new data_version after the op, but
1828 * d_revalidate may see old_dentry between the op having taken place
1829 * and the version being updated.
1830 *
1831 * So drop the old_dentry for now to make other threads go through
1832 * lookup instead - which we hold a lock against.
1833 */
1834 d_drop(old_dentry);
1835
1794 ret = -ERESTARTSYS; 1836 ret = -ERESTARTSYS;
1795 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { 1837 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
1796 afs_dataversion_t orig_data_version; 1838 afs_dataversion_t orig_data_version;
@@ -1802,9 +1844,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1802 if (orig_dvnode != new_dvnode) { 1844 if (orig_dvnode != new_dvnode) {
1803 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { 1845 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
1804 afs_end_vnode_operation(&fc); 1846 afs_end_vnode_operation(&fc);
1805 goto error_rehash; 1847 goto error_rehash_old;
1806 } 1848 }
1807 new_data_version = new_dvnode->status.data_version; 1849 new_data_version = new_dvnode->status.data_version + 1;
1808 } else { 1850 } else {
1809 new_data_version = orig_data_version; 1851 new_data_version = orig_data_version;
1810 new_scb = &scb[0]; 1852 new_scb = &scb[0];
@@ -1827,7 +1869,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1827 } 1869 }
1828 ret = afs_end_vnode_operation(&fc); 1870 ret = afs_end_vnode_operation(&fc);
1829 if (ret < 0) 1871 if (ret < 0)
1830 goto error_rehash; 1872 goto error_rehash_old;
1831 } 1873 }
1832 1874
1833 if (ret == 0) { 1875 if (ret == 0) {
@@ -1853,10 +1895,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1853 drop_nlink(new_inode); 1895 drop_nlink(new_inode);
1854 spin_unlock(&new_inode->i_lock); 1896 spin_unlock(&new_inode->i_lock);
1855 } 1897 }
1898
1899 /* Now we can update d_fsdata on the dentries to reflect their
1900 * new parent's data_version.
1901 *
1902 * Note that if we ever implement RENAME_EXCHANGE, we'll have
1903 * to update both dentries with opposing dir versions.
1904 */
1905 if (new_dvnode != orig_dvnode) {
1906 afs_update_dentry_version(&fc, old_dentry, &scb[1]);
1907 afs_update_dentry_version(&fc, new_dentry, &scb[1]);
1908 } else {
1909 afs_update_dentry_version(&fc, old_dentry, &scb[0]);
1910 afs_update_dentry_version(&fc, new_dentry, &scb[0]);
1911 }
1856 d_move(old_dentry, new_dentry); 1912 d_move(old_dentry, new_dentry);
1857 goto error_tmp; 1913 goto error_tmp;
1858 } 1914 }
1859 1915
1916error_rehash_old:
1917 d_rehash(new_dentry);
1860error_rehash: 1918error_rehash:
1861 if (rehash) 1919 if (rehash)
1862 d_rehash(rehash); 1920 d_rehash(rehash);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 56b69576274d..dd3c55c9101c 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
191 int i; 191 int i;
192 192
193 if (refcount_dec_and_test(&req->usage)) { 193 if (refcount_dec_and_test(&req->usage)) {
194 for (i = 0; i < req->nr_pages; i++) 194 if (req->pages) {
195 if (req->pages[i]) 195 for (i = 0; i < req->nr_pages; i++)
196 put_page(req->pages[i]); 196 if (req->pages[i])
197 if (req->pages != req->array) 197 put_page(req->pages[i]);
198 kfree(req->pages); 198 if (req->pages != req->array)
199 kfree(req->pages);
200 }
199 kfree(req); 201 kfree(req);
200 } 202 }
201} 203}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 1ce73e014139..114f281f3687 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -339,8 +339,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
339 call->tmp_u = htonl(0); 339 call->tmp_u = htonl(0);
340 afs_extract_to_tmp(call); 340 afs_extract_to_tmp(call);
341 } 341 }
342 /* Fall through */
342 343
343 /* Fall through - and extract the returned data length */ 344 /* extract the returned data length */
344 case 1: 345 case 1:
345 _debug("extract data length"); 346 _debug("extract data length");
346 ret = afs_extract_data(call, true); 347 ret = afs_extract_data(call, true);
@@ -366,8 +367,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
366 call->bvec[0].bv_page = req->pages[req->index]; 367 call->bvec[0].bv_page = req->pages[req->index];
367 iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); 368 iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
368 ASSERTCMP(size, <=, PAGE_SIZE); 369 ASSERTCMP(size, <=, PAGE_SIZE);
370 /* Fall through */
369 371
370 /* Fall through - and extract the returned data */ 372 /* extract the returned data */
371 case 2: 373 case 2:
372 _debug("extract data %zu/%llu", 374 _debug("extract data %zu/%llu",
373 iov_iter_count(&call->iter), req->remain); 375 iov_iter_count(&call->iter), req->remain);
@@ -394,8 +396,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
394 /* Discard any excess data the server gave us */ 396 /* Discard any excess data the server gave us */
395 iov_iter_discard(&call->iter, READ, req->actual_len - req->len); 397 iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
396 call->unmarshall = 3; 398 call->unmarshall = 3;
397
398 /* Fall through */ 399 /* Fall through */
400
399 case 3: 401 case 3:
400 _debug("extract discard %zu/%llu", 402 _debug("extract discard %zu/%llu",
401 iov_iter_count(&call->iter), req->actual_len - req->len); 403 iov_iter_count(&call->iter), req->actual_len - req->len);
@@ -407,8 +409,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
407 no_more_data: 409 no_more_data:
408 call->unmarshall = 4; 410 call->unmarshall = 4;
409 afs_extract_to_buf(call, (21 + 3 + 6) * 4); 411 afs_extract_to_buf(call, (21 + 3 + 6) * 4);
412 /* Fall through */
410 413
411 /* Fall through - and extract the metadata */ 414 /* extract the metadata */
412 case 4: 415 case 4:
413 ret = afs_extract_data(call, false); 416 ret = afs_extract_data(call, false);
414 if (ret < 0) 417 if (ret < 0)
@@ -1471,8 +1474,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1471 case 0: 1474 case 0:
1472 call->unmarshall++; 1475 call->unmarshall++;
1473 afs_extract_to_buf(call, 12 * 4); 1476 afs_extract_to_buf(call, 12 * 4);
1477 /* Fall through */
1474 1478
1475 /* Fall through - and extract the returned status record */ 1479 /* extract the returned status record */
1476 case 1: 1480 case 1:
1477 _debug("extract status"); 1481 _debug("extract status");
1478 ret = afs_extract_data(call, true); 1482 ret = afs_extract_data(call, true);
@@ -1483,8 +1487,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1483 xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus); 1487 xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
1484 call->unmarshall++; 1488 call->unmarshall++;
1485 afs_extract_to_tmp(call); 1489 afs_extract_to_tmp(call);
1490 /* Fall through */
1486 1491
1487 /* Fall through - and extract the volume name length */ 1492 /* extract the volume name length */
1488 case 2: 1493 case 2:
1489 ret = afs_extract_data(call, true); 1494 ret = afs_extract_data(call, true);
1490 if (ret < 0) 1495 if (ret < 0)
@@ -1498,8 +1503,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1498 size = (call->count + 3) & ~3; /* It's padded */ 1503 size = (call->count + 3) & ~3; /* It's padded */
1499 afs_extract_to_buf(call, size); 1504 afs_extract_to_buf(call, size);
1500 call->unmarshall++; 1505 call->unmarshall++;
1506 /* Fall through */
1501 1507
1502 /* Fall through - and extract the volume name */ 1508 /* extract the volume name */
1503 case 3: 1509 case 3:
1504 _debug("extract volname"); 1510 _debug("extract volname");
1505 ret = afs_extract_data(call, true); 1511 ret = afs_extract_data(call, true);
@@ -1511,8 +1517,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1511 _debug("volname '%s'", p); 1517 _debug("volname '%s'", p);
1512 afs_extract_to_tmp(call); 1518 afs_extract_to_tmp(call);
1513 call->unmarshall++; 1519 call->unmarshall++;
1520 /* Fall through */
1514 1521
1515 /* Fall through - and extract the offline message length */ 1522 /* extract the offline message length */
1516 case 4: 1523 case 4:
1517 ret = afs_extract_data(call, true); 1524 ret = afs_extract_data(call, true);
1518 if (ret < 0) 1525 if (ret < 0)
@@ -1526,8 +1533,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1526 size = (call->count + 3) & ~3; /* It's padded */ 1533 size = (call->count + 3) & ~3; /* It's padded */
1527 afs_extract_to_buf(call, size); 1534 afs_extract_to_buf(call, size);
1528 call->unmarshall++; 1535 call->unmarshall++;
1536 /* Fall through */
1529 1537
1530 /* Fall through - and extract the offline message */ 1538 /* extract the offline message */
1531 case 5: 1539 case 5:
1532 _debug("extract offline"); 1540 _debug("extract offline");
1533 ret = afs_extract_data(call, true); 1541 ret = afs_extract_data(call, true);
@@ -1540,8 +1548,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1540 1548
1541 afs_extract_to_tmp(call); 1549 afs_extract_to_tmp(call);
1542 call->unmarshall++; 1550 call->unmarshall++;
1551 /* Fall through */
1543 1552
1544 /* Fall through - and extract the message of the day length */ 1553 /* extract the message of the day length */
1545 case 6: 1554 case 6:
1546 ret = afs_extract_data(call, true); 1555 ret = afs_extract_data(call, true);
1547 if (ret < 0) 1556 if (ret < 0)
@@ -1555,8 +1564,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
1555 size = (call->count + 3) & ~3; /* It's padded */ 1564 size = (call->count + 3) & ~3; /* It's padded */
1556 afs_extract_to_buf(call, size); 1565 afs_extract_to_buf(call, size);
1557 call->unmarshall++; 1566 call->unmarshall++;
1567 /* Fall through */
1558 1568
1559 /* Fall through - and extract the message of the day */ 1569 /* extract the message of the day */
1560 case 7: 1570 case 7:
1561 _debug("extract motd"); 1571 _debug("extract motd");
1562 ret = afs_extract_data(call, false); 1572 ret = afs_extract_data(call, false);
@@ -1850,8 +1860,9 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
1850 case 0: 1860 case 0:
1851 afs_extract_to_tmp(call); 1861 afs_extract_to_tmp(call);
1852 call->unmarshall++; 1862 call->unmarshall++;
1863 /* Fall through */
1853 1864
1854 /* Fall through - and extract the capabilities word count */ 1865 /* Extract the capabilities word count */
1855 case 1: 1866 case 1:
1856 ret = afs_extract_data(call, true); 1867 ret = afs_extract_data(call, true);
1857 if (ret < 0) 1868 if (ret < 0)
@@ -1863,8 +1874,9 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
1863 call->count2 = count; 1874 call->count2 = count;
1864 iov_iter_discard(&call->iter, READ, count * sizeof(__be32)); 1875 iov_iter_discard(&call->iter, READ, count * sizeof(__be32));
1865 call->unmarshall++; 1876 call->unmarshall++;
1877 /* Fall through */
1866 1878
1867 /* Fall through - and extract capabilities words */ 1879 /* Extract capabilities words */
1868 case 2: 1880 case 2:
1869 ret = afs_extract_data(call, false); 1881 ret = afs_extract_data(call, false);
1870 if (ret < 0) 1882 if (ret < 0)
@@ -2020,9 +2032,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
2020 case 0: 2032 case 0:
2021 afs_extract_to_tmp(call); 2033 afs_extract_to_tmp(call);
2022 call->unmarshall++; 2034 call->unmarshall++;
2035 /* Fall through */
2023 2036
2024 /* Extract the file status count and array in two steps */ 2037 /* Extract the file status count and array in two steps */
2025 /* Fall through */
2026 case 1: 2038 case 1:
2027 _debug("extract status count"); 2039 _debug("extract status count");
2028 ret = afs_extract_data(call, true); 2040 ret = afs_extract_data(call, true);
@@ -2039,8 +2051,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
2039 call->unmarshall++; 2051 call->unmarshall++;
2040 more_counts: 2052 more_counts:
2041 afs_extract_to_buf(call, 21 * sizeof(__be32)); 2053 afs_extract_to_buf(call, 21 * sizeof(__be32));
2042
2043 /* Fall through */ 2054 /* Fall through */
2055
2044 case 2: 2056 case 2:
2045 _debug("extract status array %u", call->count); 2057 _debug("extract status array %u", call->count);
2046 ret = afs_extract_data(call, true); 2058 ret = afs_extract_data(call, true);
@@ -2060,9 +2072,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
2060 call->count = 0; 2072 call->count = 0;
2061 call->unmarshall++; 2073 call->unmarshall++;
2062 afs_extract_to_tmp(call); 2074 afs_extract_to_tmp(call);
2075 /* Fall through */
2063 2076
2064 /* Extract the callback count and array in two steps */ 2077 /* Extract the callback count and array in two steps */
2065 /* Fall through */
2066 case 3: 2078 case 3:
2067 _debug("extract CB count"); 2079 _debug("extract CB count");
2068 ret = afs_extract_data(call, true); 2080 ret = afs_extract_data(call, true);
@@ -2078,8 +2090,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
2078 call->unmarshall++; 2090 call->unmarshall++;
2079 more_cbs: 2091 more_cbs:
2080 afs_extract_to_buf(call, 3 * sizeof(__be32)); 2092 afs_extract_to_buf(call, 3 * sizeof(__be32));
2081
2082 /* Fall through */ 2093 /* Fall through */
2094
2083 case 4: 2095 case 4:
2084 _debug("extract CB array"); 2096 _debug("extract CB array");
2085 ret = afs_extract_data(call, true); 2097 ret = afs_extract_data(call, true);
@@ -2096,8 +2108,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
2096 2108
2097 afs_extract_to_buf(call, 6 * sizeof(__be32)); 2109 afs_extract_to_buf(call, 6 * sizeof(__be32));
2098 call->unmarshall++; 2110 call->unmarshall++;
2099
2100 /* Fall through */ 2111 /* Fall through */
2112
2101 case 5: 2113 case 5:
2102 ret = afs_extract_data(call, false); 2114 ret = afs_extract_data(call, false);
2103 if (ret < 0) 2115 if (ret < 0)
@@ -2193,6 +2205,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
2193 case 0: 2205 case 0:
2194 afs_extract_to_tmp(call); 2206 afs_extract_to_tmp(call);
2195 call->unmarshall++; 2207 call->unmarshall++;
2208 /* Fall through */
2196 2209
2197 /* extract the returned data length */ 2210 /* extract the returned data length */
2198 case 1: 2211 case 1:
@@ -2210,6 +2223,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
2210 acl->size = call->count2; 2223 acl->size = call->count2;
2211 afs_extract_begin(call, acl->data, size); 2224 afs_extract_begin(call, acl->data, size);
2212 call->unmarshall++; 2225 call->unmarshall++;
2226 /* Fall through */
2213 2227
2214 /* extract the returned data */ 2228 /* extract the returned data */
2215 case 2: 2229 case 2:
@@ -2219,6 +2233,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
2219 2233
2220 afs_extract_to_buf(call, (21 + 6) * 4); 2234 afs_extract_to_buf(call, (21 + 6) * 4);
2221 call->unmarshall++; 2235 call->unmarshall++;
2236 /* Fall through */
2222 2237
2223 /* extract the metadata */ 2238 /* extract the metadata */
2224 case 3: 2239 case 3:
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index d7e0fd3c00df..cfb0ac4bd039 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
56 struct afs_uuid__xdr *xdr; 56 struct afs_uuid__xdr *xdr;
57 struct afs_uuid *uuid; 57 struct afs_uuid *uuid;
58 int j; 58 int j;
59 int n = entry->nr_servers;
59 60
60 tmp = ntohl(uvldb->serverFlags[i]); 61 tmp = ntohl(uvldb->serverFlags[i]);
61 if (tmp & AFS_VLSF_DONTUSE || 62 if (tmp & AFS_VLSF_DONTUSE ||
62 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 63 (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
63 continue; 64 continue;
64 if (tmp & AFS_VLSF_RWVOL) { 65 if (tmp & AFS_VLSF_RWVOL) {
65 entry->fs_mask[i] |= AFS_VOL_VTM_RW; 66 entry->fs_mask[n] |= AFS_VOL_VTM_RW;
66 if (vlflags & AFS_VLF_BACKEXISTS) 67 if (vlflags & AFS_VLF_BACKEXISTS)
67 entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 68 entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
68 } 69 }
69 if (tmp & AFS_VLSF_ROVOL) 70 if (tmp & AFS_VLSF_ROVOL)
70 entry->fs_mask[i] |= AFS_VOL_VTM_RO; 71 entry->fs_mask[n] |= AFS_VOL_VTM_RO;
71 if (!entry->fs_mask[i]) 72 if (!entry->fs_mask[n])
72 continue; 73 continue;
73 74
74 xdr = &uvldb->serverNumber[i]; 75 xdr = &uvldb->serverNumber[i];
75 uuid = (struct afs_uuid *)&entry->fs_server[i]; 76 uuid = (struct afs_uuid *)&entry->fs_server[n];
76 uuid->time_low = xdr->time_low; 77 uuid->time_low = xdr->time_low;
77 uuid->time_mid = htons(ntohl(xdr->time_mid)); 78 uuid->time_mid = htons(ntohl(xdr->time_mid));
78 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); 79 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 18722aaeda33..ca2452806ebf 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -450,8 +450,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
450 req->offset = req->pos & (PAGE_SIZE - 1); 450 req->offset = req->pos & (PAGE_SIZE - 1);
451 afs_extract_to_tmp64(call); 451 afs_extract_to_tmp64(call);
452 call->unmarshall++; 452 call->unmarshall++;
453 /* Fall through */
453 454
454 /* Fall through - and extract the returned data length */ 455 /* extract the returned data length */
455 case 1: 456 case 1:
456 _debug("extract data length"); 457 _debug("extract data length");
457 ret = afs_extract_data(call, true); 458 ret = afs_extract_data(call, true);
@@ -477,8 +478,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
477 call->bvec[0].bv_page = req->pages[req->index]; 478 call->bvec[0].bv_page = req->pages[req->index];
478 iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); 479 iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
479 ASSERTCMP(size, <=, PAGE_SIZE); 480 ASSERTCMP(size, <=, PAGE_SIZE);
481 /* Fall through */
480 482
481 /* Fall through - and extract the returned data */ 483 /* extract the returned data */
482 case 2: 484 case 2:
483 _debug("extract data %zu/%llu", 485 _debug("extract data %zu/%llu",
484 iov_iter_count(&call->iter), req->remain); 486 iov_iter_count(&call->iter), req->remain);
@@ -505,8 +507,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
505 /* Discard any excess data the server gave us */ 507 /* Discard any excess data the server gave us */
506 iov_iter_discard(&call->iter, READ, req->actual_len - req->len); 508 iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
507 call->unmarshall = 3; 509 call->unmarshall = 3;
508
509 /* Fall through */ 510 /* Fall through */
511
510 case 3: 512 case 3:
511 _debug("extract discard %zu/%llu", 513 _debug("extract discard %zu/%llu",
512 iov_iter_count(&call->iter), req->actual_len - req->len); 514 iov_iter_count(&call->iter), req->actual_len - req->len);
@@ -521,8 +523,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
521 sizeof(struct yfs_xdr_YFSFetchStatus) + 523 sizeof(struct yfs_xdr_YFSFetchStatus) +
522 sizeof(struct yfs_xdr_YFSCallBack) + 524 sizeof(struct yfs_xdr_YFSCallBack) +
523 sizeof(struct yfs_xdr_YFSVolSync)); 525 sizeof(struct yfs_xdr_YFSVolSync));
526 /* Fall through */
524 527
525 /* Fall through - and extract the metadata */ 528 /* extract the metadata */
526 case 4: 529 case 4:
527 ret = afs_extract_data(call, false); 530 ret = afs_extract_data(call, false);
528 if (ret < 0) 531 if (ret < 0)
@@ -539,8 +542,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
539 req->file_size = call->out_scb->status.size; 542 req->file_size = call->out_scb->status.size;
540 543
541 call->unmarshall++; 544 call->unmarshall++;
542
543 /* Fall through */ 545 /* Fall through */
546
544 case 5: 547 case 5:
545 break; 548 break;
546 } 549 }
@@ -1429,8 +1432,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1429 case 0: 1432 case 0:
1430 call->unmarshall++; 1433 call->unmarshall++;
1431 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); 1434 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
1435 /* Fall through */
1432 1436
1433 /* Fall through - and extract the returned status record */ 1437 /* extract the returned status record */
1434 case 1: 1438 case 1:
1435 _debug("extract status"); 1439 _debug("extract status");
1436 ret = afs_extract_data(call, true); 1440 ret = afs_extract_data(call, true);
@@ -1441,8 +1445,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1441 xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus); 1445 xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
1442 call->unmarshall++; 1446 call->unmarshall++;
1443 afs_extract_to_tmp(call); 1447 afs_extract_to_tmp(call);
1448 /* Fall through */
1444 1449
1445 /* Fall through - and extract the volume name length */ 1450 /* extract the volume name length */
1446 case 2: 1451 case 2:
1447 ret = afs_extract_data(call, true); 1452 ret = afs_extract_data(call, true);
1448 if (ret < 0) 1453 if (ret < 0)
@@ -1456,8 +1461,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1456 size = (call->count + 3) & ~3; /* It's padded */ 1461 size = (call->count + 3) & ~3; /* It's padded */
1457 afs_extract_to_buf(call, size); 1462 afs_extract_to_buf(call, size);
1458 call->unmarshall++; 1463 call->unmarshall++;
1464 /* Fall through */
1459 1465
1460 /* Fall through - and extract the volume name */ 1466 /* extract the volume name */
1461 case 3: 1467 case 3:
1462 _debug("extract volname"); 1468 _debug("extract volname");
1463 ret = afs_extract_data(call, true); 1469 ret = afs_extract_data(call, true);
@@ -1469,8 +1475,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1469 _debug("volname '%s'", p); 1475 _debug("volname '%s'", p);
1470 afs_extract_to_tmp(call); 1476 afs_extract_to_tmp(call);
1471 call->unmarshall++; 1477 call->unmarshall++;
1478 /* Fall through */
1472 1479
1473 /* Fall through - and extract the offline message length */ 1480 /* extract the offline message length */
1474 case 4: 1481 case 4:
1475 ret = afs_extract_data(call, true); 1482 ret = afs_extract_data(call, true);
1476 if (ret < 0) 1483 if (ret < 0)
@@ -1484,8 +1491,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1484 size = (call->count + 3) & ~3; /* It's padded */ 1491 size = (call->count + 3) & ~3; /* It's padded */
1485 afs_extract_to_buf(call, size); 1492 afs_extract_to_buf(call, size);
1486 call->unmarshall++; 1493 call->unmarshall++;
1494 /* Fall through */
1487 1495
1488 /* Fall through - and extract the offline message */ 1496 /* extract the offline message */
1489 case 5: 1497 case 5:
1490 _debug("extract offline"); 1498 _debug("extract offline");
1491 ret = afs_extract_data(call, true); 1499 ret = afs_extract_data(call, true);
@@ -1498,8 +1506,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1498 1506
1499 afs_extract_to_tmp(call); 1507 afs_extract_to_tmp(call);
1500 call->unmarshall++; 1508 call->unmarshall++;
1509 /* Fall through */
1501 1510
1502 /* Fall through - and extract the message of the day length */ 1511 /* extract the message of the day length */
1503 case 6: 1512 case 6:
1504 ret = afs_extract_data(call, true); 1513 ret = afs_extract_data(call, true);
1505 if (ret < 0) 1514 if (ret < 0)
@@ -1513,8 +1522,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1513 size = (call->count + 3) & ~3; /* It's padded */ 1522 size = (call->count + 3) & ~3; /* It's padded */
1514 afs_extract_to_buf(call, size); 1523 afs_extract_to_buf(call, size);
1515 call->unmarshall++; 1524 call->unmarshall++;
1525 /* Fall through */
1516 1526
1517 /* Fall through - and extract the message of the day */ 1527 /* extract the message of the day */
1518 case 7: 1528 case 7:
1519 _debug("extract motd"); 1529 _debug("extract motd");
1520 ret = afs_extract_data(call, false); 1530 ret = afs_extract_data(call, false);
@@ -1526,8 +1536,8 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
1526 _debug("motd '%s'", p); 1536 _debug("motd '%s'", p);
1527 1537
1528 call->unmarshall++; 1538 call->unmarshall++;
1529
1530 /* Fall through */ 1539 /* Fall through */
1540
1531 case 8: 1541 case 8:
1532 break; 1542 break;
1533 } 1543 }
@@ -1805,9 +1815,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
1805 case 0: 1815 case 0:
1806 afs_extract_to_tmp(call); 1816 afs_extract_to_tmp(call);
1807 call->unmarshall++; 1817 call->unmarshall++;
1818 /* Fall through */
1808 1819
1809 /* Extract the file status count and array in two steps */ 1820 /* Extract the file status count and array in two steps */
1810 /* Fall through */
1811 case 1: 1821 case 1:
1812 _debug("extract status count"); 1822 _debug("extract status count");
1813 ret = afs_extract_data(call, true); 1823 ret = afs_extract_data(call, true);
@@ -1824,8 +1834,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
1824 call->unmarshall++; 1834 call->unmarshall++;
1825 more_counts: 1835 more_counts:
1826 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); 1836 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
1827
1828 /* Fall through */ 1837 /* Fall through */
1838
1829 case 2: 1839 case 2:
1830 _debug("extract status array %u", call->count); 1840 _debug("extract status array %u", call->count);
1831 ret = afs_extract_data(call, true); 1841 ret = afs_extract_data(call, true);
@@ -1845,9 +1855,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
1845 call->count = 0; 1855 call->count = 0;
1846 call->unmarshall++; 1856 call->unmarshall++;
1847 afs_extract_to_tmp(call); 1857 afs_extract_to_tmp(call);
1858 /* Fall through */
1848 1859
1849 /* Extract the callback count and array in two steps */ 1860 /* Extract the callback count and array in two steps */
1850 /* Fall through */
1851 case 3: 1861 case 3:
1852 _debug("extract CB count"); 1862 _debug("extract CB count");
1853 ret = afs_extract_data(call, true); 1863 ret = afs_extract_data(call, true);
@@ -1863,8 +1873,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
1863 call->unmarshall++; 1873 call->unmarshall++;
1864 more_cbs: 1874 more_cbs:
1865 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); 1875 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
1866
1867 /* Fall through */ 1876 /* Fall through */
1877
1868 case 4: 1878 case 4:
1869 _debug("extract CB array"); 1879 _debug("extract CB array");
1870 ret = afs_extract_data(call, true); 1880 ret = afs_extract_data(call, true);
@@ -1881,8 +1891,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
1881 1891
1882 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); 1892 afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
1883 call->unmarshall++; 1893 call->unmarshall++;
1884
1885 /* Fall through */ 1894 /* Fall through */
1895
1886 case 5: 1896 case 5:
1887 ret = afs_extract_data(call, false); 1897 ret = afs_extract_data(call, false);
1888 if (ret < 0) 1898 if (ret < 0)
@@ -1892,8 +1902,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
1892 xdr_decode_YFSVolSync(&bp, call->out_volsync); 1902 xdr_decode_YFSVolSync(&bp, call->out_volsync);
1893 1903
1894 call->unmarshall++; 1904 call->unmarshall++;
1895
1896 /* Fall through */ 1905 /* Fall through */
1906
1897 case 6: 1907 case 6:
1898 break; 1908 break;
1899 } 1909 }
@@ -1978,6 +1988,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
1978 case 0: 1988 case 0:
1979 afs_extract_to_tmp(call); 1989 afs_extract_to_tmp(call);
1980 call->unmarshall++; 1990 call->unmarshall++;
1991 /* Fall through */
1981 1992
1982 /* Extract the file ACL length */ 1993 /* Extract the file ACL length */
1983 case 1: 1994 case 1:
@@ -1999,6 +2010,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
1999 iov_iter_discard(&call->iter, READ, size); 2010 iov_iter_discard(&call->iter, READ, size);
2000 } 2011 }
2001 call->unmarshall++; 2012 call->unmarshall++;
2013 /* Fall through */
2002 2014
2003 /* Extract the file ACL */ 2015 /* Extract the file ACL */
2004 case 2: 2016 case 2:
@@ -2008,6 +2020,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
2008 2020
2009 afs_extract_to_tmp(call); 2021 afs_extract_to_tmp(call);
2010 call->unmarshall++; 2022 call->unmarshall++;
2023 /* Fall through */
2011 2024
2012 /* Extract the volume ACL length */ 2025 /* Extract the volume ACL length */
2013 case 3: 2026 case 3:
@@ -2029,6 +2042,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
2029 iov_iter_discard(&call->iter, READ, size); 2042 iov_iter_discard(&call->iter, READ, size);
2030 } 2043 }
2031 call->unmarshall++; 2044 call->unmarshall++;
2045 /* Fall through */
2032 2046
2033 /* Extract the volume ACL */ 2047 /* Extract the volume ACL */
2034 case 4: 2048 case 4:
@@ -2041,6 +2055,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
2041 sizeof(struct yfs_xdr_YFSFetchStatus) + 2055 sizeof(struct yfs_xdr_YFSFetchStatus) +
2042 sizeof(struct yfs_xdr_YFSVolSync)); 2056 sizeof(struct yfs_xdr_YFSVolSync));
2043 call->unmarshall++; 2057 call->unmarshall++;
2058 /* Fall through */
2044 2059
2045 /* extract the metadata */ 2060 /* extract the metadata */
2046 case 5: 2061 case 5:
@@ -2057,6 +2072,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
2057 xdr_decode_YFSVolSync(&bp, call->out_volsync); 2072 xdr_decode_YFSVolSync(&bp, call->out_volsync);
2058 2073
2059 call->unmarshall++; 2074 call->unmarshall++;
2075 /* Fall through */
2060 2076
2061 case 6: 2077 case 6:
2062 break; 2078 break;
@@ -2155,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
2155 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); 2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
2156 2172
2157 size = round_up(acl->size, 4); 2173 size = round_up(acl->size, 4);
2158 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, 2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
2159 sizeof(__be32) * 2 + 2175 sizeof(__be32) * 2 +
2160 sizeof(struct yfs_xdr_YFSFid) + 2176 sizeof(struct yfs_xdr_YFSFid) +
2161 sizeof(__be32) + size, 2177 sizeof(__be32) + size,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4707dfff991b..677cb364d33f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1139,8 +1139,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
1139 * Pointer to the block device containing @bdev on success, ERR_PTR() 1139 * Pointer to the block device containing @bdev on success, ERR_PTR()
1140 * value on failure. 1140 * value on failure.
1141 */ 1141 */
1142static struct block_device *bd_start_claiming(struct block_device *bdev, 1142struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
1143 void *holder)
1144{ 1143{
1145 struct gendisk *disk; 1144 struct gendisk *disk;
1146 struct block_device *whole; 1145 struct block_device *whole;
@@ -1187,6 +1186,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
1187 return ERR_PTR(err); 1186 return ERR_PTR(err);
1188 } 1187 }
1189} 1188}
1189EXPORT_SYMBOL(bd_start_claiming);
1190
1191static void bd_clear_claiming(struct block_device *whole, void *holder)
1192{
1193 lockdep_assert_held(&bdev_lock);
1194 /* tell others that we're done */
1195 BUG_ON(whole->bd_claiming != holder);
1196 whole->bd_claiming = NULL;
1197 wake_up_bit(&whole->bd_claiming, 0);
1198}
1199
1200/**
1201 * bd_finish_claiming - finish claiming of a block device
1202 * @bdev: block device of interest
1203 * @whole: whole block device (returned from bd_start_claiming())
1204 * @holder: holder that has claimed @bdev
1205 *
1206 * Finish exclusive open of a block device. Mark the device as exlusively
1207 * open by the holder and wake up all waiters for exclusive open to finish.
1208 */
1209void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
1210 void *holder)
1211{
1212 spin_lock(&bdev_lock);
1213 BUG_ON(!bd_may_claim(bdev, whole, holder));
1214 /*
1215 * Note that for a whole device bd_holders will be incremented twice,
1216 * and bd_holder will be set to bd_may_claim before being set to holder
1217 */
1218 whole->bd_holders++;
1219 whole->bd_holder = bd_may_claim;
1220 bdev->bd_holders++;
1221 bdev->bd_holder = holder;
1222 bd_clear_claiming(whole, holder);
1223 spin_unlock(&bdev_lock);
1224}
1225EXPORT_SYMBOL(bd_finish_claiming);
1226
1227/**
1228 * bd_abort_claiming - abort claiming of a block device
1229 * @bdev: block device of interest
1230 * @whole: whole block device (returned from bd_start_claiming())
1231 * @holder: holder that has claimed @bdev
1232 *
1233 * Abort claiming of a block device when the exclusive open failed. This can be
1234 * also used when exclusive open is not actually desired and we just needed
1235 * to block other exclusive openers for a while.
1236 */
1237void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
1238 void *holder)
1239{
1240 spin_lock(&bdev_lock);
1241 bd_clear_claiming(whole, holder);
1242 spin_unlock(&bdev_lock);
1243}
1244EXPORT_SYMBOL(bd_abort_claiming);
1190 1245
1191#ifdef CONFIG_SYSFS 1246#ifdef CONFIG_SYSFS
1192struct bd_holder_disk { 1247struct bd_holder_disk {
@@ -1656,29 +1711,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1656 1711
1657 /* finish claiming */ 1712 /* finish claiming */
1658 mutex_lock(&bdev->bd_mutex); 1713 mutex_lock(&bdev->bd_mutex);
1659 spin_lock(&bdev_lock); 1714 if (!res)
1660 1715 bd_finish_claiming(bdev, whole, holder);
1661 if (!res) { 1716 else
1662 BUG_ON(!bd_may_claim(bdev, whole, holder)); 1717 bd_abort_claiming(bdev, whole, holder);
1663 /*
1664 * Note that for a whole device bd_holders
1665 * will be incremented twice, and bd_holder
1666 * will be set to bd_may_claim before being
1667 * set to holder
1668 */
1669 whole->bd_holders++;
1670 whole->bd_holder = bd_may_claim;
1671 bdev->bd_holders++;
1672 bdev->bd_holder = holder;
1673 }
1674
1675 /* tell others that we're done */
1676 BUG_ON(whole->bd_claiming != holder);
1677 whole->bd_claiming = NULL;
1678 wake_up_bit(&whole->bd_claiming, 0);
1679
1680 spin_unlock(&bdev_lock);
1681
1682 /* 1718 /*
1683 * Block event polling for write claims if requested. Any 1719 * Block event polling for write claims if requested. Any
1684 * write holder makes the write_holder state stick until 1720 * write holder makes the write_holder state stick until
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 212b4a854f2c..38651fae7f21 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -4,6 +4,7 @@ config BTRFS_FS
4 tristate "Btrfs filesystem support" 4 tristate "Btrfs filesystem support"
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_CRC32C 6 select CRYPTO_CRC32C
7 select LIBCRC32C
7 select ZLIB_INFLATE 8 select ZLIB_INFLATE
8 select ZLIB_DEFLATE 9 select ZLIB_DEFLATE
9 select LZO_COMPRESS 10 select LZO_COMPRESS
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 89116afda7a2..e5d85311d5d5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1483 ulist_init(roots); 1483 ulist_init(roots);
1484 ulist_init(tmp); 1484 ulist_init(tmp);
1485 1485
1486 trans = btrfs_attach_transaction(root); 1486 trans = btrfs_join_transaction_nostart(root);
1487 if (IS_ERR(trans)) { 1487 if (IS_ERR(trans)) {
1488 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { 1488 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1489 ret = PTR_ERR(trans); 1489 ret = PTR_ERR(trans);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 299e11e6c554..94660063a162 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -401,7 +401,6 @@ struct btrfs_dev_replace {
401struct raid_kobject { 401struct raid_kobject {
402 u64 flags; 402 u64 flags;
403 struct kobject kobj; 403 struct kobject kobj;
404 struct list_head list;
405}; 404};
406 405
407/* 406/*
@@ -915,8 +914,6 @@ struct btrfs_fs_info {
915 u32 thread_pool_size; 914 u32 thread_pool_size;
916 915
917 struct kobject *space_info_kobj; 916 struct kobject *space_info_kobj;
918 struct list_head pending_raid_kobjs;
919 spinlock_t pending_raid_kobjs_lock; /* uncontended */
920 917
921 u64 total_pinned; 918 u64 total_pinned;
922 919
@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
2698int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2695int btrfs_make_block_group(struct btrfs_trans_handle *trans,
2699 u64 bytes_used, u64 type, u64 chunk_offset, 2696 u64 bytes_used, u64 type, u64 chunk_offset,
2700 u64 size); 2697 u64 size);
2701void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
2702struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2698struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
2703 struct btrfs_fs_info *fs_info, 2699 struct btrfs_fs_info *fs_info,
2704 const u64 chunk_offset); 2700 const u64 chunk_offset);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 41a2bd2e0c56..97beb351a10c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
2683 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2683 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2684 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2684 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2685 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2685 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2686 INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2687 spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2688 spin_lock_init(&fs_info->delalloc_root_lock); 2686 spin_lock_init(&fs_info->delalloc_root_lock);
2689 spin_lock_init(&fs_info->trans_lock); 2687 spin_lock_init(&fs_info->trans_lock);
2690 spin_lock_init(&fs_info->fs_roots_radix_lock); 2688 spin_lock_init(&fs_info->fs_roots_radix_lock);
@@ -4106,6 +4104,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
4106 percpu_counter_destroy(&fs_info->dev_replace.bio_counter); 4104 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4107 cleanup_srcu_struct(&fs_info->subvol_srcu); 4105 cleanup_srcu_struct(&fs_info->subvol_srcu);
4108 4106
4107 btrfs_free_csum_hash(fs_info);
4109 btrfs_free_stripe_hash_table(fs_info); 4108 btrfs_free_stripe_hash_table(fs_info);
4110 btrfs_free_ref_cache(fs_info); 4109 btrfs_free_ref_cache(fs_info);
4111} 4110}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..8b7eb22d508a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/sched/mm.h>
7#include <linux/sched/signal.h> 8#include <linux/sched/signal.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/writeback.h> 10#include <linux/writeback.h>
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7888 return 0; 7889 return 0;
7889} 7890}
7890 7891
7891/* link_block_group will queue up kobjects to add when we're reclaim-safe */
7892void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
7893{
7894 struct btrfs_space_info *space_info;
7895 struct raid_kobject *rkobj;
7896 LIST_HEAD(list);
7897 int ret = 0;
7898
7899 spin_lock(&fs_info->pending_raid_kobjs_lock);
7900 list_splice_init(&fs_info->pending_raid_kobjs, &list);
7901 spin_unlock(&fs_info->pending_raid_kobjs_lock);
7902
7903 list_for_each_entry(rkobj, &list, list) {
7904 space_info = btrfs_find_space_info(fs_info, rkobj->flags);
7905
7906 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
7907 "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
7908 if (ret) {
7909 kobject_put(&rkobj->kobj);
7910 break;
7911 }
7912 }
7913 if (ret)
7914 btrfs_warn(fs_info,
7915 "failed to add kobject for block cache, ignoring");
7916}
7917
7918static void link_block_group(struct btrfs_block_group_cache *cache) 7892static void link_block_group(struct btrfs_block_group_cache *cache)
7919{ 7893{
7920 struct btrfs_space_info *space_info = cache->space_info; 7894 struct btrfs_space_info *space_info = cache->space_info;
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
7929 up_write(&space_info->groups_sem); 7903 up_write(&space_info->groups_sem);
7930 7904
7931 if (first) { 7905 if (first) {
7932 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 7906 struct raid_kobject *rkobj;
7907 unsigned int nofs_flag;
7908 int ret;
7909
7910 /*
7911 * Setup a NOFS context because kobject_add(), deep in its call
7912 * chain, does GFP_KERNEL allocations, and we are often called
7913 * in a context where if reclaim is triggered we can deadlock
7914 * (we are either holding a transaction handle or some lock
7915 * required for a transaction commit).
7916 */
7917 nofs_flag = memalloc_nofs_save();
7918 rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
7933 if (!rkobj) { 7919 if (!rkobj) {
7920 memalloc_nofs_restore(nofs_flag);
7934 btrfs_warn(cache->fs_info, 7921 btrfs_warn(cache->fs_info,
7935 "couldn't alloc memory for raid level kobject"); 7922 "couldn't alloc memory for raid level kobject");
7936 return; 7923 return;
7937 } 7924 }
7938 rkobj->flags = cache->flags; 7925 rkobj->flags = cache->flags;
7939 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 7926 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
7940 7927 ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
7941 spin_lock(&fs_info->pending_raid_kobjs_lock); 7928 btrfs_bg_type_to_raid_name(rkobj->flags));
7942 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); 7929 memalloc_nofs_restore(nofs_flag);
7943 spin_unlock(&fs_info->pending_raid_kobjs_lock); 7930 if (ret) {
7931 kobject_put(&rkobj->kobj);
7932 btrfs_warn(fs_info,
7933 "failed to add kobject for block cache, ignoring");
7934 return;
7935 }
7944 space_info->block_group_kobjs[index] = &rkobj->kobj; 7936 space_info->block_group_kobjs[index] = &rkobj->kobj;
7945 } 7937 }
7946} 7938}
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
8206 inc_block_group_ro(cache, 1); 8198 inc_block_group_ro(cache, 1);
8207 } 8199 }
8208 8200
8209 btrfs_add_raid_kobjects(info);
8210 btrfs_init_global_block_rsv(info); 8201 btrfs_init_global_block_rsv(info);
8211 ret = check_chunk_block_group_mappings(info); 8202 ret = check_chunk_block_group_mappings(info);
8212error: 8203error:
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8975 struct btrfs_device *device; 8966 struct btrfs_device *device;
8976 struct list_head *devices; 8967 struct list_head *devices;
8977 u64 group_trimmed; 8968 u64 group_trimmed;
8969 u64 range_end = U64_MAX;
8978 u64 start; 8970 u64 start;
8979 u64 end; 8971 u64 end;
8980 u64 trimmed = 0; 8972 u64 trimmed = 0;
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8984 int dev_ret = 0; 8976 int dev_ret = 0;
8985 int ret = 0; 8977 int ret = 0;
8986 8978
8979 /*
8980 * Check range overflow if range->len is set.
8981 * The default range->len is U64_MAX.
8982 */
8983 if (range->len != U64_MAX &&
8984 check_add_overflow(range->start, range->len, &range_end))
8985 return -EINVAL;
8986
8987 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8987 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8988 for (; cache; cache = next_block_group(cache)) { 8988 for (; cache; cache = next_block_group(cache)) {
8989 if (cache->key.objectid >= (range->start + range->len)) { 8989 if (cache->key.objectid >= range_end) {
8990 btrfs_put_block_group(cache); 8990 btrfs_put_block_group(cache);
8991 break; 8991 break;
8992 } 8992 }
8993 8993
8994 start = max(range->start, cache->key.objectid); 8994 start = max(range->start, cache->key.objectid);
8995 end = min(range->start + range->len, 8995 end = min(range_end, cache->key.objectid + cache->key.offset);
8996 cache->key.objectid + cache->key.offset);
8997 8996
8998 if (end - start >= range->minlen) { 8997 if (end - start >= range->minlen) {
8999 if (!block_group_cache_done(cache)) { 8998 if (!block_group_cache_done(cache)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1af069a9a0c7..ee582a36653d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -395,10 +395,31 @@ static noinline int add_async_extent(struct async_chunk *cow,
395 return 0; 395 return 0;
396} 396}
397 397
398/*
399 * Check if the inode has flags compatible with compression
400 */
401static inline bool inode_can_compress(struct inode *inode)
402{
403 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
404 BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
405 return false;
406 return true;
407}
408
409/*
410 * Check if the inode needs to be submitted to compression, based on mount
411 * options, defragmentation, properties or heuristics.
412 */
398static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) 413static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
399{ 414{
400 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 415 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
401 416
417 if (!inode_can_compress(inode)) {
418 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
419 KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
420 btrfs_ino(BTRFS_I(inode)));
421 return 0;
422 }
402 /* force compress */ 423 /* force compress */
403 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 424 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
404 return 1; 425 return 1;
@@ -1631,7 +1652,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
1631 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1652 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1632 ret = run_delalloc_nocow(inode, locked_page, start, end, 1653 ret = run_delalloc_nocow(inode, locked_page, start, end,
1633 page_started, 0, nr_written); 1654 page_started, 0, nr_written);
1634 } else if (!inode_need_compress(inode, start, end)) { 1655 } else if (!inode_can_compress(inode) ||
1656 !inode_need_compress(inode, start, end)) {
1635 ret = cow_file_range(inode, locked_page, start, end, end, 1657 ret = cow_file_range(inode, locked_page, start, end, end,
1636 page_started, nr_written, 1, NULL); 1658 page_started, nr_written, 1, NULL);
1637 } else { 1659 } else {
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 98fccce4208c..393eceda57c8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -346,9 +346,12 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
346 if (blockers) { 346 if (blockers) {
347 btrfs_assert_no_spinning_writers(eb); 347 btrfs_assert_no_spinning_writers(eb);
348 eb->blocking_writers--; 348 eb->blocking_writers--;
349 /* Use the lighter barrier after atomic */ 349 /*
350 smp_mb__after_atomic(); 350 * We need to order modifying blocking_writers above with
351 cond_wake_up_nomb(&eb->write_lock_wq); 351 * actually waking up the sleepers to ensure they see the
352 * updated value of blocking_writers
353 */
354 cond_wake_up(&eb->write_lock_wq);
352 } else { 355 } else {
353 btrfs_assert_spinning_writers_put(eb); 356 btrfs_assert_spinning_writers_put(eb);
354 write_unlock(&eb->lock); 357 write_unlock(&eb->lock);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 1744ba8b2754..ae7f64a8facb 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -985,13 +985,14 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
985 struct extent_state **cached_state) 985 struct extent_state **cached_state)
986{ 986{
987 struct btrfs_ordered_extent *ordered; 987 struct btrfs_ordered_extent *ordered;
988 struct extent_state *cachedp = NULL; 988 struct extent_state *cache = NULL;
989 struct extent_state **cachedp = &cache;
989 990
990 if (cached_state) 991 if (cached_state)
991 cachedp = *cached_state; 992 cachedp = cached_state;
992 993
993 while (1) { 994 while (1) {
994 lock_extent_bits(tree, start, end, &cachedp); 995 lock_extent_bits(tree, start, end, cachedp);
995 ordered = btrfs_lookup_ordered_range(inode, start, 996 ordered = btrfs_lookup_ordered_range(inode, start,
996 end - start + 1); 997 end - start + 1);
997 if (!ordered) { 998 if (!ordered) {
@@ -1001,10 +1002,10 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
1001 * aren't exposing it outside of this function 1002 * aren't exposing it outside of this function
1002 */ 1003 */
1003 if (!cached_state) 1004 if (!cached_state)
1004 refcount_dec(&cachedp->refs); 1005 refcount_dec(&cache->refs);
1005 break; 1006 break;
1006 } 1007 }
1007 unlock_extent_cached(tree, start, end, &cachedp); 1008 unlock_extent_cached(tree, start, end, cachedp);
1008 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); 1009 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1009 btrfs_put_ordered_extent(ordered); 1010 btrfs_put_ordered_extent(ordered);
1010 } 1011 }
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 69b59bf75882..c3c0c064c25d 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx,
6322{ 6322{
6323 int ret = 0; 6323 int ret = 0;
6324 6324
6325 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6325 /*
6326 6326 * We have found an extent item that changed without the inode item
6327 if (result == BTRFS_COMPARE_TREE_CHANGED) { 6327 * having changed. This can happen either after relocation (where the
6328 struct extent_buffer *leaf_l; 6328 * disk_bytenr of an extent item is replaced at
6329 struct extent_buffer *leaf_r; 6329 * relocation.c:replace_file_extents()) or after deduplication into a
6330 struct btrfs_file_extent_item *ei_l; 6330 * file in both the parent and send snapshots (where an extent item can
6331 struct btrfs_file_extent_item *ei_r; 6331 * get modified or replaced with a new one). Note that deduplication
6332 6332 * updates the inode item, but it only changes the iversion (sequence
6333 leaf_l = sctx->left_path->nodes[0]; 6333 * field in the inode item) of the inode, so if a file is deduplicated
6334 leaf_r = sctx->right_path->nodes[0]; 6334 * the same amount of times in both the parent and send snapshots, its
6335 ei_l = btrfs_item_ptr(leaf_l, 6335 * iversion becames the same in both snapshots, whence the inode item is
6336 sctx->left_path->slots[0], 6336 * the same on both snapshots.
6337 struct btrfs_file_extent_item); 6337 */
6338 ei_r = btrfs_item_ptr(leaf_r, 6338 if (sctx->cur_ino != sctx->cmp_key->objectid)
6339 sctx->right_path->slots[0], 6339 return 0;
6340 struct btrfs_file_extent_item);
6341
6342 /*
6343 * We may have found an extent item that has changed
6344 * only its disk_bytenr field and the corresponding
6345 * inode item was not updated. This case happens due to
6346 * very specific timings during relocation when a leaf
6347 * that contains file extent items is COWed while
6348 * relocation is ongoing and its in the stage where it
6349 * updates data pointers. So when this happens we can
6350 * safely ignore it since we know it's the same extent,
6351 * but just at different logical and physical locations
6352 * (when an extent is fully replaced with a new one, we
6353 * know the generation number must have changed too,
6354 * since snapshot creation implies committing the current
6355 * transaction, and the inode item must have been updated
6356 * as well).
6357 * This replacement of the disk_bytenr happens at
6358 * relocation.c:replace_file_extents() through
6359 * relocation.c:btrfs_reloc_cow_block().
6360 */
6361 if (btrfs_file_extent_generation(leaf_l, ei_l) ==
6362 btrfs_file_extent_generation(leaf_r, ei_r) &&
6363 btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
6364 btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
6365 btrfs_file_extent_compression(leaf_l, ei_l) ==
6366 btrfs_file_extent_compression(leaf_r, ei_r) &&
6367 btrfs_file_extent_encryption(leaf_l, ei_l) ==
6368 btrfs_file_extent_encryption(leaf_r, ei_r) &&
6369 btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
6370 btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
6371 btrfs_file_extent_type(leaf_l, ei_l) ==
6372 btrfs_file_extent_type(leaf_r, ei_r) &&
6373 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
6374 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
6375 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
6376 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
6377 btrfs_file_extent_offset(leaf_l, ei_l) ==
6378 btrfs_file_extent_offset(leaf_r, ei_r) &&
6379 btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
6380 btrfs_file_extent_num_bytes(leaf_r, ei_r))
6381 return 0;
6382 }
6383
6384 inconsistent_snapshot_error(sctx, result, "extent");
6385 return -EIO;
6386 }
6387 6340
6388 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6341 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6389 if (result != BTRFS_COMPARE_TREE_DELETED) 6342 if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3b8ae1a8f02d..e3adb714c04b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
28 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), 28 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
29 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | 29 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
30 __TRANS_ATTACH | 30 __TRANS_ATTACH |
31 __TRANS_JOIN), 31 __TRANS_JOIN |
32 __TRANS_JOIN_NOSTART),
32 [TRANS_STATE_UNBLOCKED] = (__TRANS_START | 33 [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
33 __TRANS_ATTACH | 34 __TRANS_ATTACH |
34 __TRANS_JOIN | 35 __TRANS_JOIN |
35 __TRANS_JOIN_NOLOCK), 36 __TRANS_JOIN_NOLOCK |
37 __TRANS_JOIN_NOSTART),
36 [TRANS_STATE_COMPLETED] = (__TRANS_START | 38 [TRANS_STATE_COMPLETED] = (__TRANS_START |
37 __TRANS_ATTACH | 39 __TRANS_ATTACH |
38 __TRANS_JOIN | 40 __TRANS_JOIN |
39 __TRANS_JOIN_NOLOCK), 41 __TRANS_JOIN_NOLOCK |
42 __TRANS_JOIN_NOSTART),
40}; 43};
41 44
42void btrfs_put_transaction(struct btrfs_transaction *transaction) 45void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -543,7 +546,8 @@ again:
543 ret = join_transaction(fs_info, type); 546 ret = join_transaction(fs_info, type);
544 if (ret == -EBUSY) { 547 if (ret == -EBUSY) {
545 wait_current_trans(fs_info); 548 wait_current_trans(fs_info);
546 if (unlikely(type == TRANS_ATTACH)) 549 if (unlikely(type == TRANS_ATTACH ||
550 type == TRANS_JOIN_NOSTART))
547 ret = -ENOENT; 551 ret = -ENOENT;
548 } 552 }
549 } while (ret == -EBUSY); 553 } while (ret == -EBUSY);
@@ -660,6 +664,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
660} 664}
661 665
662/* 666/*
667 * Similar to regular join but it never starts a transaction when none is
668 * running or after waiting for the current one to finish.
669 */
670struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
671{
672 return start_transaction(root, 0, TRANS_JOIN_NOSTART,
673 BTRFS_RESERVE_NO_FLUSH, true);
674}
675
676/*
663 * btrfs_attach_transaction() - catch the running transaction 677 * btrfs_attach_transaction() - catch the running transaction
664 * 678 *
665 * It is used when we want to commit the current the transaction, but 679 * It is used when we want to commit the current the transaction, but
@@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2037 } 2051 }
2038 } else { 2052 } else {
2039 spin_unlock(&fs_info->trans_lock); 2053 spin_unlock(&fs_info->trans_lock);
2054 /*
2055 * The previous transaction was aborted and was already removed
2056 * from the list of transactions at fs_info->trans_list. So we
2057 * abort to prevent writing a new superblock that reflects a
2058 * corrupt state (pointing to trees with unwritten nodes/leafs).
2059 */
2060 if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2061 ret = -EROFS;
2062 goto cleanup_transaction;
2063 }
2040 } 2064 }
2041 2065
2042 extwriter_counter_dec(cur_trans, trans->type); 2066 extwriter_counter_dec(cur_trans, trans->type);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 527ea94b57d9..2c5a6f6e5bb0 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -94,11 +94,13 @@ struct btrfs_transaction {
94#define __TRANS_JOIN (1U << 11) 94#define __TRANS_JOIN (1U << 11)
95#define __TRANS_JOIN_NOLOCK (1U << 12) 95#define __TRANS_JOIN_NOLOCK (1U << 12)
96#define __TRANS_DUMMY (1U << 13) 96#define __TRANS_DUMMY (1U << 13)
97#define __TRANS_JOIN_NOSTART (1U << 14)
97 98
98#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) 99#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
99#define TRANS_ATTACH (__TRANS_ATTACH) 100#define TRANS_ATTACH (__TRANS_ATTACH)
100#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) 101#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
101#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) 102#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
103#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
102 104
103#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) 105#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
104 106
@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
183 int min_factor); 185 int min_factor);
184struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); 186struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
185struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); 187struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
188struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
186struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); 189struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
187struct btrfs_trans_handle *btrfs_attach_transaction_barrier( 190struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
188 struct btrfs_root *root); 191 struct btrfs_root *root);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a13ddba1ebc3..a447d3ec48d5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3087 if (ret) 3087 if (ret)
3088 return ret; 3088 return ret;
3089 3089
3090 /*
3091 * We add the kobjects here (and after forcing data chunk creation)
3092 * since relocation is the only place we'll create chunks of a new
3093 * type at runtime. The only place where we'll remove the last
3094 * chunk of a type is the call immediately below this one. Even
3095 * so, we're protected against races with the cleaner thread since
3096 * we're covered by the delete_unused_bgs_mutex.
3097 */
3098 btrfs_add_raid_kobjects(fs_info);
3099
3100 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3090 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3101 chunk_offset); 3091 chunk_offset);
3102 if (IS_ERR(trans)) { 3092 if (IS_ERR(trans)) {
@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3223 btrfs_end_transaction(trans); 3213 btrfs_end_transaction(trans);
3224 if (ret < 0) 3214 if (ret < 0)
3225 return ret; 3215 return ret;
3226
3227 btrfs_add_raid_kobjects(fs_info);
3228
3229 return 1; 3216 return 1;
3230 } 3217 }
3231 } 3218 }
@@ -5941,6 +5928,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5941 u64 stripe_len; 5928 u64 stripe_len;
5942 u64 raid56_full_stripe_start = (u64)-1; 5929 u64 raid56_full_stripe_start = (u64)-1;
5943 int data_stripes; 5930 int data_stripes;
5931 int ret = 0;
5944 5932
5945 ASSERT(op != BTRFS_MAP_DISCARD); 5933 ASSERT(op != BTRFS_MAP_DISCARD);
5946 5934
@@ -5961,8 +5949,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5961 btrfs_crit(fs_info, 5949 btrfs_crit(fs_info,
5962"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu", 5950"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5963 stripe_offset, offset, em->start, logical, stripe_len); 5951 stripe_offset, offset, em->start, logical, stripe_len);
5964 free_extent_map(em); 5952 ret = -EINVAL;
5965 return -EINVAL; 5953 goto out;
5966 } 5954 }
5967 5955
5968 /* stripe_offset is the offset of this block in its stripe */ 5956 /* stripe_offset is the offset of this block in its stripe */
@@ -6009,7 +5997,10 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6009 io_geom->stripe_offset = stripe_offset; 5997 io_geom->stripe_offset = stripe_offset;
6010 io_geom->raid56_stripe_offset = raid56_full_stripe_start; 5998 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6011 5999
6012 return 0; 6000out:
6001 /* once for us */
6002 free_extent_map(em);
6003 return ret;
6013} 6004}
6014 6005
6015static int __btrfs_map_block(struct btrfs_fs_info *fs_info, 6006static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..b3c8b886bf64 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ get_more_pages:
913 if (page_offset(page) >= ceph_wbc.i_size) { 913 if (page_offset(page) >= ceph_wbc.i_size) {
914 dout("%p page eof %llu\n", 914 dout("%p page eof %llu\n",
915 page, ceph_wbc.i_size); 915 page, ceph_wbc.i_size);
916 if (ceph_wbc.size_stable || 916 if ((ceph_wbc.size_stable ||
917 page_offset(page) >= i_size_read(inode)) 917 page_offset(page) >= i_size_read(inode)) &&
918 clear_page_dirty_for_io(page))
918 mapping->a_ops->invalidatepage(page, 919 mapping->a_ops->invalidatepage(page,
919 0, PAGE_SIZE); 920 0, PAGE_SIZE);
920 unlock_page(page); 921 unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..ce0f5658720a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1301{ 1301{
1302 struct ceph_inode_info *ci = cap->ci; 1302 struct ceph_inode_info *ci = cap->ci;
1303 struct inode *inode = &ci->vfs_inode; 1303 struct inode *inode = &ci->vfs_inode;
1304 struct ceph_buffer *old_blob = NULL;
1304 struct cap_msg_args arg; 1305 struct cap_msg_args arg;
1305 int held, revoking; 1306 int held, revoking;
1306 int wake = 0; 1307 int wake = 0;
@@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1365 ci->i_requested_max_size = arg.max_size; 1366 ci->i_requested_max_size = arg.max_size;
1366 1367
1367 if (flushing & CEPH_CAP_XATTR_EXCL) { 1368 if (flushing & CEPH_CAP_XATTR_EXCL) {
1368 __ceph_build_xattrs_blob(ci); 1369 old_blob = __ceph_build_xattrs_blob(ci);
1369 arg.xattr_version = ci->i_xattrs.version; 1370 arg.xattr_version = ci->i_xattrs.version;
1370 arg.xattr_buf = ci->i_xattrs.blob; 1371 arg.xattr_buf = ci->i_xattrs.blob;
1371 } else { 1372 } else {
@@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1409 1410
1410 spin_unlock(&ci->i_ceph_lock); 1411 spin_unlock(&ci->i_ceph_lock);
1411 1412
1413 ceph_buffer_put(old_blob);
1414
1412 ret = send_cap_msg(&arg); 1415 ret = send_cap_msg(&arg);
1413 if (ret < 0) { 1416 if (ret < 0) {
1414 dout("error sending cap msg, must requeue %p\n", inode); 1417 dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..18500edefc56 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
736 int issued, new_issued, info_caps; 736 int issued, new_issued, info_caps;
737 struct timespec64 mtime, atime, ctime; 737 struct timespec64 mtime, atime, ctime;
738 struct ceph_buffer *xattr_blob = NULL; 738 struct ceph_buffer *xattr_blob = NULL;
739 struct ceph_buffer *old_blob = NULL;
739 struct ceph_string *pool_ns = NULL; 740 struct ceph_string *pool_ns = NULL;
740 struct ceph_cap *new_cap = NULL; 741 struct ceph_cap *new_cap = NULL;
741 int err = 0; 742 int err = 0;
@@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
881 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 882 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
882 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 883 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
883 if (ci->i_xattrs.blob) 884 if (ci->i_xattrs.blob)
884 ceph_buffer_put(ci->i_xattrs.blob); 885 old_blob = ci->i_xattrs.blob;
885 ci->i_xattrs.blob = xattr_blob; 886 ci->i_xattrs.blob = xattr_blob;
886 if (xattr_blob) 887 if (xattr_blob)
887 memcpy(ci->i_xattrs.blob->vec.iov_base, 888 memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1022out: 1023out:
1023 if (new_cap) 1024 if (new_cap)
1024 ceph_put_cap(mdsc, new_cap); 1025 ceph_put_cap(mdsc, new_cap);
1025 if (xattr_blob) 1026 ceph_buffer_put(old_blob);
1026 ceph_buffer_put(xattr_blob); 1027 ceph_buffer_put(xattr_blob);
1027 ceph_put_string(pool_ns); 1028 ceph_put_string(pool_ns);
1028 return err; 1029 return err;
1029} 1030}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..5083e238ad15 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
111 req->r_wait_for_completion = ceph_lock_wait_for_completion; 111 req->r_wait_for_completion = ceph_lock_wait_for_completion;
112 112
113 err = ceph_mdsc_do_request(mdsc, inode, req); 113 err = ceph_mdsc_do_request(mdsc, inode, req);
114 114 if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
115 if (operation == CEPH_MDS_OP_GETFILELOCK) {
116 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); 115 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
117 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) 116 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
118 fl->fl_type = F_RDLCK; 117 fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
465 struct inode *inode = &ci->vfs_inode; 465 struct inode *inode = &ci->vfs_inode;
466 struct ceph_cap_snap *capsnap; 466 struct ceph_cap_snap *capsnap;
467 struct ceph_snap_context *old_snapc, *new_snapc; 467 struct ceph_snap_context *old_snapc, *new_snapc;
468 struct ceph_buffer *old_blob = NULL;
468 int used, dirty; 469 int used, dirty;
469 470
470 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 471 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
541 capsnap->gid = inode->i_gid; 542 capsnap->gid = inode->i_gid;
542 543
543 if (dirty & CEPH_CAP_XATTR_EXCL) { 544 if (dirty & CEPH_CAP_XATTR_EXCL) {
544 __ceph_build_xattrs_blob(ci); 545 old_blob = __ceph_build_xattrs_blob(ci);
545 capsnap->xattr_blob = 546 capsnap->xattr_blob =
546 ceph_buffer_get(ci->i_xattrs.blob); 547 ceph_buffer_get(ci->i_xattrs.blob);
547 capsnap->xattr_version = ci->i_xattrs.version; 548 capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
584 } 585 }
585 spin_unlock(&ci->i_ceph_lock); 586 spin_unlock(&ci->i_ceph_lock);
586 587
588 ceph_buffer_put(old_blob);
587 kfree(capsnap); 589 kfree(capsnap);
588 ceph_put_snap_context(old_snapc); 590 ceph_put_snap_context(old_snapc);
589} 591}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..6b9f1ee7de85 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); 926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); 927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); 928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
929extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); 929extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); 930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
931extern const struct xattr_handler *ceph_xattr_handlers[]; 931extern const struct xattr_handler *ceph_xattr_handlers[];
932 932
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..939eab7aa219 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
754 754
755/* 755/*
756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
757 * and swap into place. 757 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
758 * that it can be freed by the caller as the i_ceph_lock is likely to be
759 * held.
758 */ 760 */
759void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) 761struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
760{ 762{
761 struct rb_node *p; 763 struct rb_node *p;
762 struct ceph_inode_xattr *xattr = NULL; 764 struct ceph_inode_xattr *xattr = NULL;
765 struct ceph_buffer *old_blob = NULL;
763 void *dest; 766 void *dest;
764 767
765 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 768 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
790 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 793 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
791 794
792 if (ci->i_xattrs.blob) 795 if (ci->i_xattrs.blob)
793 ceph_buffer_put(ci->i_xattrs.blob); 796 old_blob = ci->i_xattrs.blob;
794 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 797 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
795 ci->i_xattrs.prealloc_blob = NULL; 798 ci->i_xattrs.prealloc_blob = NULL;
796 ci->i_xattrs.dirty = false; 799 ci->i_xattrs.dirty = false;
797 ci->i_xattrs.version++; 800 ci->i_xattrs.version++;
798 } 801 }
802
803 return old_blob;
799} 804}
800 805
801static inline int __get_request_mask(struct inode *in) { 806static inline int __get_request_mask(struct inode *in) {
@@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1036 struct ceph_inode_info *ci = ceph_inode(inode); 1041 struct ceph_inode_info *ci = ceph_inode(inode);
1037 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1042 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1038 struct ceph_cap_flush *prealloc_cf = NULL; 1043 struct ceph_cap_flush *prealloc_cf = NULL;
1044 struct ceph_buffer *old_blob = NULL;
1039 int issued; 1045 int issued;
1040 int err; 1046 int err;
1041 int dirty = 0; 1047 int dirty = 0;
@@ -1109,13 +1115,15 @@ retry:
1109 struct ceph_buffer *blob; 1115 struct ceph_buffer *blob;
1110 1116
1111 spin_unlock(&ci->i_ceph_lock); 1117 spin_unlock(&ci->i_ceph_lock);
1112 dout(" preaallocating new blob size=%d\n", required_blob_size); 1118 ceph_buffer_put(old_blob); /* Shouldn't be required */
1119 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1113 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1120 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1114 if (!blob) 1121 if (!blob)
1115 goto do_sync_unlocked; 1122 goto do_sync_unlocked;
1116 spin_lock(&ci->i_ceph_lock); 1123 spin_lock(&ci->i_ceph_lock);
1124 /* prealloc_blob can't be released while holding i_ceph_lock */
1117 if (ci->i_xattrs.prealloc_blob) 1125 if (ci->i_xattrs.prealloc_blob)
1118 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 1126 old_blob = ci->i_xattrs.prealloc_blob;
1119 ci->i_xattrs.prealloc_blob = blob; 1127 ci->i_xattrs.prealloc_blob = blob;
1120 goto retry; 1128 goto retry;
1121 } 1129 }
@@ -1131,6 +1139,7 @@ retry:
1131 } 1139 }
1132 1140
1133 spin_unlock(&ci->i_ceph_lock); 1141 spin_unlock(&ci->i_ceph_lock);
1142 ceph_buffer_put(old_blob);
1134 if (lock_snap_rwsem) 1143 if (lock_snap_rwsem)
1135 up_read(&mdsc->snap_rwsem); 1144 up_read(&mdsc->snap_rwsem);
1136 if (dirty) 1145 if (dirty)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4b21a90015a9..99caf77df4a2 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
152extern const struct export_operations cifs_export_ops; 152extern const struct export_operations cifs_export_ops;
153#endif /* CONFIG_CIFS_NFSD_EXPORT */ 153#endif /* CONFIG_CIFS_NFSD_EXPORT */
154 154
155#define CIFS_VERSION "2.21" 155#define CIFS_VERSION "2.22"
156#endif /* _CIFSFS_H */ 156#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e23234207fc2..592a6cea2b79 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
579 unsigned int *len, unsigned int *offset); 579 unsigned int *len, unsigned int *offset);
580 580
581void extract_unc_hostname(const char *unc, const char **h, size_t *len); 581void extract_unc_hostname(const char *unc, const char **h, size_t *len);
582int copy_path_name(char *dst, const char *src);
582 583
583#ifdef CONFIG_CIFS_DFS_UPCALL 584#ifdef CONFIG_CIFS_DFS_UPCALL
584static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, 585static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e2f95965065d..3907653e63c7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -942,10 +942,8 @@ PsxDelete:
942 PATH_MAX, nls_codepage, remap); 942 PATH_MAX, nls_codepage, remap);
943 name_len++; /* trailing null */ 943 name_len++; /* trailing null */
944 name_len *= 2; 944 name_len *= 2;
945 } else { /* BB add path length overrun check */ 945 } else {
946 name_len = strnlen(fileName, PATH_MAX); 946 name_len = copy_path_name(pSMB->FileName, fileName);
947 name_len++; /* trailing null */
948 strncpy(pSMB->FileName, fileName, name_len);
949 } 947 }
950 948
951 params = 6 + name_len; 949 params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
1015 remap); 1013 remap);
1016 name_len++; /* trailing null */ 1014 name_len++; /* trailing null */
1017 name_len *= 2; 1015 name_len *= 2;
1018 } else { /* BB improve check for buffer overruns BB */ 1016 } else {
1019 name_len = strnlen(name, PATH_MAX); 1017 name_len = copy_path_name(pSMB->fileName, name);
1020 name_len++; /* trailing null */
1021 strncpy(pSMB->fileName, name, name_len);
1022 } 1018 }
1023 pSMB->SearchAttributes = 1019 pSMB->SearchAttributes =
1024 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); 1020 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
1062 remap); 1058 remap);
1063 name_len++; /* trailing null */ 1059 name_len++; /* trailing null */
1064 name_len *= 2; 1060 name_len *= 2;
1065 } else { /* BB improve check for buffer overruns BB */ 1061 } else {
1066 name_len = strnlen(name, PATH_MAX); 1062 name_len = copy_path_name(pSMB->DirName, name);
1067 name_len++; /* trailing null */
1068 strncpy(pSMB->DirName, name, name_len);
1069 } 1063 }
1070 1064
1071 pSMB->BufferFormat = 0x04; 1065 pSMB->BufferFormat = 0x04;
@@ -1107,10 +1101,8 @@ MkDirRetry:
1107 remap); 1101 remap);
1108 name_len++; /* trailing null */ 1102 name_len++; /* trailing null */
1109 name_len *= 2; 1103 name_len *= 2;
1110 } else { /* BB improve check for buffer overruns BB */ 1104 } else {
1111 name_len = strnlen(name, PATH_MAX); 1105 name_len = copy_path_name(pSMB->DirName, name);
1112 name_len++; /* trailing null */
1113 strncpy(pSMB->DirName, name, name_len);
1114 } 1106 }
1115 1107
1116 pSMB->BufferFormat = 0x04; 1108 pSMB->BufferFormat = 0x04;
@@ -1157,10 +1149,8 @@ PsxCreat:
1157 PATH_MAX, nls_codepage, remap); 1149 PATH_MAX, nls_codepage, remap);
1158 name_len++; /* trailing null */ 1150 name_len++; /* trailing null */
1159 name_len *= 2; 1151 name_len *= 2;
1160 } else { /* BB improve the check for buffer overruns BB */ 1152 } else {
1161 name_len = strnlen(name, PATH_MAX); 1153 name_len = copy_path_name(pSMB->FileName, name);
1162 name_len++; /* trailing null */
1163 strncpy(pSMB->FileName, name, name_len);
1164 } 1154 }
1165 1155
1166 params = 6 + name_len; 1156 params = 6 + name_len;
@@ -1324,11 +1314,9 @@ OldOpenRetry:
1324 fileName, PATH_MAX, nls_codepage, remap); 1314 fileName, PATH_MAX, nls_codepage, remap);
1325 name_len++; /* trailing null */ 1315 name_len++; /* trailing null */
1326 name_len *= 2; 1316 name_len *= 2;
1327 } else { /* BB improve check for buffer overruns BB */ 1317 } else {
1328 count = 0; /* no pad */ 1318 count = 0; /* no pad */
1329 name_len = strnlen(fileName, PATH_MAX); 1319 name_len = copy_path_name(pSMB->fileName, fileName);
1330 name_len++; /* trailing null */
1331 strncpy(pSMB->fileName, fileName, name_len);
1332 } 1320 }
1333 if (*pOplock & REQ_OPLOCK) 1321 if (*pOplock & REQ_OPLOCK)
1334 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); 1322 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1442,11 +1430,8 @@ openRetry:
1442 /* BB improve check for buffer overruns BB */ 1430 /* BB improve check for buffer overruns BB */
1443 /* no pad */ 1431 /* no pad */
1444 count = 0; 1432 count = 0;
1445 name_len = strnlen(path, PATH_MAX); 1433 name_len = copy_path_name(req->fileName, path);
1446 /* trailing null */
1447 name_len++;
1448 req->NameLength = cpu_to_le16(name_len); 1434 req->NameLength = cpu_to_le16(name_len);
1449 strncpy(req->fileName, path, name_len);
1450 } 1435 }
1451 1436
1452 if (*oplock & REQ_OPLOCK) 1437 if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2797,10 @@ renameRetry:
2812 remap); 2797 remap);
2813 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2798 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2814 name_len2 *= 2; /* convert to bytes */ 2799 name_len2 *= 2; /* convert to bytes */
2815 } else { /* BB improve the check for buffer overruns BB */ 2800 } else {
2816 name_len = strnlen(from_name, PATH_MAX); 2801 name_len = copy_path_name(pSMB->OldFileName, from_name);
2817 name_len++; /* trailing null */ 2802 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
2818 strncpy(pSMB->OldFileName, from_name, name_len);
2819 name_len2 = strnlen(to_name, PATH_MAX);
2820 name_len2++; /* trailing null */
2821 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2803 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2822 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
2823 name_len2++; /* trailing null */
2824 name_len2++; /* signature byte */ 2804 name_len2++; /* signature byte */
2825 } 2805 }
2826 2806
@@ -2962,15 +2942,10 @@ copyRetry:
2962 toName, PATH_MAX, nls_codepage, remap); 2942 toName, PATH_MAX, nls_codepage, remap);
2963 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2943 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2964 name_len2 *= 2; /* convert to bytes */ 2944 name_len2 *= 2; /* convert to bytes */
2965 } else { /* BB improve the check for buffer overruns BB */ 2945 } else {
2966 name_len = strnlen(fromName, PATH_MAX); 2946 name_len = copy_path_name(pSMB->OldFileName, fromName);
2967 name_len++; /* trailing null */
2968 strncpy(pSMB->OldFileName, fromName, name_len);
2969 name_len2 = strnlen(toName, PATH_MAX);
2970 name_len2++; /* trailing null */
2971 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2947 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2972 strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); 2948 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
2973 name_len2++; /* trailing null */
2974 name_len2++; /* signature byte */ 2949 name_len2++; /* signature byte */
2975 } 2950 }
2976 2951
@@ -3021,10 +2996,8 @@ createSymLinkRetry:
3021 name_len++; /* trailing null */ 2996 name_len++; /* trailing null */
3022 name_len *= 2; 2997 name_len *= 2;
3023 2998
3024 } else { /* BB improve the check for buffer overruns BB */ 2999 } else {
3025 name_len = strnlen(fromName, PATH_MAX); 3000 name_len = copy_path_name(pSMB->FileName, fromName);
3026 name_len++; /* trailing null */
3027 strncpy(pSMB->FileName, fromName, name_len);
3028 } 3001 }
3029 params = 6 + name_len; 3002 params = 6 + name_len;
3030 pSMB->MaxSetupCount = 0; 3003 pSMB->MaxSetupCount = 0;
@@ -3044,10 +3017,8 @@ createSymLinkRetry:
3044 PATH_MAX, nls_codepage, remap); 3017 PATH_MAX, nls_codepage, remap);
3045 name_len_target++; /* trailing null */ 3018 name_len_target++; /* trailing null */
3046 name_len_target *= 2; 3019 name_len_target *= 2;
3047 } else { /* BB improve the check for buffer overruns BB */ 3020 } else {
3048 name_len_target = strnlen(toName, PATH_MAX); 3021 name_len_target = copy_path_name(data_offset, toName);
3049 name_len_target++; /* trailing null */
3050 strncpy(data_offset, toName, name_len_target);
3051 } 3022 }
3052 3023
3053 pSMB->MaxParameterCount = cpu_to_le16(2); 3024 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3080,8 @@ createHardLinkRetry:
3109 name_len++; /* trailing null */ 3080 name_len++; /* trailing null */
3110 name_len *= 2; 3081 name_len *= 2;
3111 3082
3112 } else { /* BB improve the check for buffer overruns BB */ 3083 } else {
3113 name_len = strnlen(toName, PATH_MAX); 3084 name_len = copy_path_name(pSMB->FileName, toName);
3114 name_len++; /* trailing null */
3115 strncpy(pSMB->FileName, toName, name_len);
3116 } 3085 }
3117 params = 6 + name_len; 3086 params = 6 + name_len;
3118 pSMB->MaxSetupCount = 0; 3087 pSMB->MaxSetupCount = 0;
@@ -3131,10 +3100,8 @@ createHardLinkRetry:
3131 PATH_MAX, nls_codepage, remap); 3100 PATH_MAX, nls_codepage, remap);
3132 name_len_target++; /* trailing null */ 3101 name_len_target++; /* trailing null */
3133 name_len_target *= 2; 3102 name_len_target *= 2;
3134 } else { /* BB improve the check for buffer overruns BB */ 3103 } else {
3135 name_len_target = strnlen(fromName, PATH_MAX); 3104 name_len_target = copy_path_name(data_offset, fromName);
3136 name_len_target++; /* trailing null */
3137 strncpy(data_offset, fromName, name_len_target);
3138 } 3105 }
3139 3106
3140 pSMB->MaxParameterCount = cpu_to_le16(2); 3107 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3180,10 @@ winCreateHardLinkRetry:
3213 remap); 3180 remap);
3214 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 3181 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
3215 name_len2 *= 2; /* convert to bytes */ 3182 name_len2 *= 2; /* convert to bytes */
3216 } else { /* BB improve the check for buffer overruns BB */ 3183 } else {
3217 name_len = strnlen(from_name, PATH_MAX); 3184 name_len = copy_path_name(pSMB->OldFileName, from_name);
3218 name_len++; /* trailing null */
3219 strncpy(pSMB->OldFileName, from_name, name_len);
3220 name_len2 = strnlen(to_name, PATH_MAX);
3221 name_len2++; /* trailing null */
3222 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 3185 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
3223 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); 3186 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
3224 name_len2++; /* trailing null */
3225 name_len2++; /* signature byte */ 3187 name_len2++; /* signature byte */
3226 } 3188 }
3227 3189
@@ -3271,10 +3233,8 @@ querySymLinkRetry:
3271 remap); 3233 remap);
3272 name_len++; /* trailing null */ 3234 name_len++; /* trailing null */
3273 name_len *= 2; 3235 name_len *= 2;
3274 } else { /* BB improve the check for buffer overruns BB */ 3236 } else {
3275 name_len = strnlen(searchName, PATH_MAX); 3237 name_len = copy_path_name(pSMB->FileName, searchName);
3276 name_len++; /* trailing null */
3277 strncpy(pSMB->FileName, searchName, name_len);
3278 } 3238 }
3279 3239
3280 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3240 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3691,10 +3651,8 @@ queryAclRetry:
3691 name_len *= 2; 3651 name_len *= 2;
3692 pSMB->FileName[name_len] = 0; 3652 pSMB->FileName[name_len] = 0;
3693 pSMB->FileName[name_len+1] = 0; 3653 pSMB->FileName[name_len+1] = 0;
3694 } else { /* BB improve the check for buffer overruns BB */ 3654 } else {
3695 name_len = strnlen(searchName, PATH_MAX); 3655 name_len = copy_path_name(pSMB->FileName, searchName);
3696 name_len++; /* trailing null */
3697 strncpy(pSMB->FileName, searchName, name_len);
3698 } 3656 }
3699 3657
3700 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3658 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3776,10 +3734,8 @@ setAclRetry:
3776 PATH_MAX, nls_codepage, remap); 3734 PATH_MAX, nls_codepage, remap);
3777 name_len++; /* trailing null */ 3735 name_len++; /* trailing null */
3778 name_len *= 2; 3736 name_len *= 2;
3779 } else { /* BB improve the check for buffer overruns BB */ 3737 } else {
3780 name_len = strnlen(fileName, PATH_MAX); 3738 name_len = copy_path_name(pSMB->FileName, fileName);
3781 name_len++; /* trailing null */
3782 strncpy(pSMB->FileName, fileName, name_len);
3783 } 3739 }
3784 params = 6 + name_len; 3740 params = 6 + name_len;
3785 pSMB->MaxParameterCount = cpu_to_le16(2); 3741 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4140,7 @@ QInfRetry:
4184 name_len++; /* trailing null */ 4140 name_len++; /* trailing null */
4185 name_len *= 2; 4141 name_len *= 2;
4186 } else { 4142 } else {
4187 name_len = strnlen(search_name, PATH_MAX); 4143 name_len = copy_path_name(pSMB->FileName, search_name);
4188 name_len++; /* trailing null */
4189 strncpy(pSMB->FileName, search_name, name_len);
4190 } 4144 }
4191 pSMB->BufferFormat = 0x04; 4145 pSMB->BufferFormat = 0x04;
4192 name_len++; /* account for buffer type byte */ 4146 name_len++; /* account for buffer type byte */
@@ -4321,10 +4275,8 @@ QPathInfoRetry:
4321 PATH_MAX, nls_codepage, remap); 4275 PATH_MAX, nls_codepage, remap);
4322 name_len++; /* trailing null */ 4276 name_len++; /* trailing null */
4323 name_len *= 2; 4277 name_len *= 2;
4324 } else { /* BB improve the check for buffer overruns BB */ 4278 } else {
4325 name_len = strnlen(search_name, PATH_MAX); 4279 name_len = copy_path_name(pSMB->FileName, search_name);
4326 name_len++; /* trailing null */
4327 strncpy(pSMB->FileName, search_name, name_len);
4328 } 4280 }
4329 4281
4330 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4282 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4442,8 @@ UnixQPathInfoRetry:
4490 PATH_MAX, nls_codepage, remap); 4442 PATH_MAX, nls_codepage, remap);
4491 name_len++; /* trailing null */ 4443 name_len++; /* trailing null */
4492 name_len *= 2; 4444 name_len *= 2;
4493 } else { /* BB improve the check for buffer overruns BB */ 4445 } else {
4494 name_len = strnlen(searchName, PATH_MAX); 4446 name_len = copy_path_name(pSMB->FileName, searchName);
4495 name_len++; /* trailing null */
4496 strncpy(pSMB->FileName, searchName, name_len);
4497 } 4447 }
4498 4448
4499 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4449 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4543,16 @@ findFirstRetry:
4593 pSMB->FileName[name_len+1] = 0; 4543 pSMB->FileName[name_len+1] = 0;
4594 name_len += 2; 4544 name_len += 2;
4595 } 4545 }
4596 } else { /* BB add check for overrun of SMB buf BB */ 4546 } else {
4597 name_len = strnlen(searchName, PATH_MAX); 4547 name_len = copy_path_name(pSMB->FileName, searchName);
4598/* BB fix here and in unicode clause above ie
4599 if (name_len > buffersize-header)
4600 free buffer exit; BB */
4601 strncpy(pSMB->FileName, searchName, name_len);
4602 if (msearch) { 4548 if (msearch) {
4603 pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); 4549 if (WARN_ON_ONCE(name_len > PATH_MAX-2))
4604 pSMB->FileName[name_len+1] = '*'; 4550 name_len = PATH_MAX-2;
4605 pSMB->FileName[name_len+2] = 0; 4551 /* overwrite nul byte */
4606 name_len += 3; 4552 pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
4553 pSMB->FileName[name_len] = '*';
4554 pSMB->FileName[name_len+1] = 0;
4555 name_len += 2;
4607 } 4556 }
4608 } 4557 }
4609 4558
@@ -4898,10 +4847,8 @@ GetInodeNumberRetry:
4898 remap); 4847 remap);
4899 name_len++; /* trailing null */ 4848 name_len++; /* trailing null */
4900 name_len *= 2; 4849 name_len *= 2;
4901 } else { /* BB improve the check for buffer overruns BB */ 4850 } else {
4902 name_len = strnlen(search_name, PATH_MAX); 4851 name_len = copy_path_name(pSMB->FileName, search_name);
4903 name_len++; /* trailing null */
4904 strncpy(pSMB->FileName, search_name, name_len);
4905 } 4852 }
4906 4853
4907 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 4854 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -5008,9 +4955,7 @@ getDFSRetry:
5008 name_len++; /* trailing null */ 4955 name_len++; /* trailing null */
5009 name_len *= 2; 4956 name_len *= 2;
5010 } else { /* BB improve the check for buffer overruns BB */ 4957 } else { /* BB improve the check for buffer overruns BB */
5011 name_len = strnlen(search_name, PATH_MAX); 4958 name_len = copy_path_name(pSMB->RequestFileName, search_name);
5012 name_len++; /* trailing null */
5013 strncpy(pSMB->RequestFileName, search_name, name_len);
5014 } 4959 }
5015 4960
5016 if (ses->server->sign) 4961 if (ses->server->sign)
@@ -5663,10 +5608,8 @@ SetEOFRetry:
5663 PATH_MAX, cifs_sb->local_nls, remap); 5608 PATH_MAX, cifs_sb->local_nls, remap);
5664 name_len++; /* trailing null */ 5609 name_len++; /* trailing null */
5665 name_len *= 2; 5610 name_len *= 2;
5666 } else { /* BB improve the check for buffer overruns BB */ 5611 } else {
5667 name_len = strnlen(file_name, PATH_MAX); 5612 name_len = copy_path_name(pSMB->FileName, file_name);
5668 name_len++; /* trailing null */
5669 strncpy(pSMB->FileName, file_name, name_len);
5670 } 5613 }
5671 params = 6 + name_len; 5614 params = 6 + name_len;
5672 data_count = sizeof(struct file_end_of_file_info); 5615 data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5902,8 @@ SetTimesRetry:
5959 PATH_MAX, nls_codepage, remap); 5902 PATH_MAX, nls_codepage, remap);
5960 name_len++; /* trailing null */ 5903 name_len++; /* trailing null */
5961 name_len *= 2; 5904 name_len *= 2;
5962 } else { /* BB improve the check for buffer overruns BB */ 5905 } else {
5963 name_len = strnlen(fileName, PATH_MAX); 5906 name_len = copy_path_name(pSMB->FileName, fileName);
5964 name_len++; /* trailing null */
5965 strncpy(pSMB->FileName, fileName, name_len);
5966 } 5907 }
5967 5908
5968 params = 6 + name_len; 5909 params = 6 + name_len;
@@ -6040,10 +5981,8 @@ SetAttrLgcyRetry:
6040 PATH_MAX, nls_codepage); 5981 PATH_MAX, nls_codepage);
6041 name_len++; /* trailing null */ 5982 name_len++; /* trailing null */
6042 name_len *= 2; 5983 name_len *= 2;
6043 } else { /* BB improve the check for buffer overruns BB */ 5984 } else {
6044 name_len = strnlen(fileName, PATH_MAX); 5985 name_len = copy_path_name(pSMB->fileName, fileName);
6045 name_len++; /* trailing null */
6046 strncpy(pSMB->fileName, fileName, name_len);
6047 } 5986 }
6048 pSMB->attr = cpu_to_le16(dos_attrs); 5987 pSMB->attr = cpu_to_le16(dos_attrs);
6049 pSMB->BufferFormat = 0x04; 5988 pSMB->BufferFormat = 0x04;
@@ -6203,10 +6142,8 @@ setPermsRetry:
6203 PATH_MAX, nls_codepage, remap); 6142 PATH_MAX, nls_codepage, remap);
6204 name_len++; /* trailing null */ 6143 name_len++; /* trailing null */
6205 name_len *= 2; 6144 name_len *= 2;
6206 } else { /* BB improve the check for buffer overruns BB */ 6145 } else {
6207 name_len = strnlen(file_name, PATH_MAX); 6146 name_len = copy_path_name(pSMB->FileName, file_name);
6208 name_len++; /* trailing null */
6209 strncpy(pSMB->FileName, file_name, name_len);
6210 } 6147 }
6211 6148
6212 params = 6 + name_len; 6149 params = 6 + name_len;
@@ -6298,10 +6235,8 @@ QAllEAsRetry:
6298 PATH_MAX, nls_codepage, remap); 6235 PATH_MAX, nls_codepage, remap);
6299 list_len++; /* trailing null */ 6236 list_len++; /* trailing null */
6300 list_len *= 2; 6237 list_len *= 2;
6301 } else { /* BB improve the check for buffer overruns BB */ 6238 } else {
6302 list_len = strnlen(searchName, PATH_MAX); 6239 list_len = copy_path_name(pSMB->FileName, searchName);
6303 list_len++; /* trailing null */
6304 strncpy(pSMB->FileName, searchName, list_len);
6305 } 6240 }
6306 6241
6307 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; 6242 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6415,8 @@ SetEARetry:
6480 PATH_MAX, nls_codepage, remap); 6415 PATH_MAX, nls_codepage, remap);
6481 name_len++; /* trailing null */ 6416 name_len++; /* trailing null */
6482 name_len *= 2; 6417 name_len *= 2;
6483 } else { /* BB improve the check for buffer overruns BB */ 6418 } else {
6484 name_len = strnlen(fileName, PATH_MAX); 6419 name_len = copy_path_name(pSMB->FileName, fileName);
6485 name_len++; /* trailing null */
6486 strncpy(pSMB->FileName, fileName, name_len);
6487 } 6420 }
6488 6421
6489 params = 6 + name_len; 6422 params = 6 + name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a4830ced0f98..5299effa6f7d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,6 +1113,7 @@ cifs_demultiplex_thread(void *p)
1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1114 1114
1115 set_freezable(); 1115 set_freezable();
1116 allow_kernel_signal(SIGKILL);
1116 while (server->tcpStatus != CifsExiting) { 1117 while (server->tcpStatus != CifsExiting) {
1117 if (try_to_freeze()) 1118 if (try_to_freeze())
1118 continue; 1119 continue;
@@ -2980,6 +2981,7 @@ static int
2980cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) 2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2981{ 2982{
2982 int rc = 0; 2983 int rc = 0;
2984 int is_domain = 0;
2983 const char *delim, *payload; 2985 const char *delim, *payload;
2984 char *desc; 2986 char *desc;
2985 ssize_t len; 2987 ssize_t len;
@@ -3027,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3027 rc = PTR_ERR(key); 3029 rc = PTR_ERR(key);
3028 goto out_err; 3030 goto out_err;
3029 } 3031 }
3032 is_domain = 1;
3030 } 3033 }
3031 3034
3032 down_read(&key->sem); 3035 down_read(&key->sem);
@@ -3084,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3084 goto out_key_put; 3087 goto out_key_put;
3085 } 3088 }
3086 3089
3090 /*
3091 * If we have a domain key then we must set the domainName in the
3092 * for the request.
3093 */
3094 if (is_domain && ses->domainName) {
3095 vol->domainname = kstrndup(ses->domainName,
3096 strlen(ses->domainName),
3097 GFP_KERNEL);
3098 if (!vol->domainname) {
3099 cifs_dbg(FYI, "Unable to allocate %zd bytes for "
3100 "domain\n", len);
3101 rc = -ENOMEM;
3102 kfree(vol->username);
3103 vol->username = NULL;
3104 kzfree(vol->password);
3105 vol->password = NULL;
3106 goto out_key_put;
3107 }
3108 }
3109
3087out_key_put: 3110out_key_put:
3088 up_read(&key->sem); 3111 up_read(&key->sem);
3089 key_put(key); 3112 key_put(key);
@@ -4208,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
4208 strlen(vol->prepath) + 1 : 0; 4231 strlen(vol->prepath) + 1 : 0;
4209 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); 4232 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
4210 4233
4234 if (unc_len > MAX_TREE_SIZE)
4235 return ERR_PTR(-EINVAL);
4236
4211 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); 4237 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
4212 if (full_path == NULL) 4238 if (full_path == NULL)
4213 return ERR_PTR(-ENOMEM); 4239 return ERR_PTR(-ENOMEM);
4214 4240
4215 strncpy(full_path, vol->UNC, unc_len); 4241 memcpy(full_path, vol->UNC, unc_len);
4216 pos = full_path + unc_len; 4242 pos = full_path + unc_len;
4217 4243
4218 if (pplen) { 4244 if (pplen) {
4219 *pos = CIFS_DIR_SEP(cifs_sb); 4245 *pos = CIFS_DIR_SEP(cifs_sb);
4220 strncpy(pos + 1, vol->prepath, pplen); 4246 memcpy(pos + 1, vol->prepath, pplen);
4221 pos += pplen; 4247 pos += pplen;
4222 } 4248 }
4223 4249
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f26a48dd2e39..be424e81e3ad 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
69 return full_path; 69 return full_path;
70 70
71 if (dfsplen) 71 if (dfsplen)
72 strncpy(full_path, tcon->treeName, dfsplen); 72 memcpy(full_path, tcon->treeName, dfsplen);
73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb); 73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
74 strncpy(full_path + dfsplen + 1, vol->prepath, pplen); 74 memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); 75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
76 full_path[dfsplen + pplen] = 0; /* add trailing null */
77 return full_path; 76 return full_path;
78} 77}
79 78
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index f383877a6511..5ad83bdb9bea 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1011 *h = unc; 1011 *h = unc;
1012 *len = end - unc; 1012 *len = end - unc;
1013} 1013}
1014
1015/**
1016 * copy_path_name - copy src path to dst, possibly truncating
1017 *
1018 * returns number of bytes written (including trailing nul)
1019 */
1020int copy_path_name(char *dst, const char *src)
1021{
1022 int name_len;
1023
1024 /*
1025 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1026 * will truncate and strlen(dst) will be PATH_MAX-1
1027 */
1028 name_len = strscpy(dst, src, PATH_MAX);
1029 if (WARN_ON_ONCE(name_len < 0))
1030 name_len = PATH_MAX-1;
1031
1032 /* we count the trailing nul */
1033 name_len++;
1034 return name_len;
1035}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dcd49ad60c83..4c764ff7edd2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
159 const struct nls_table *nls_cp) 159 const struct nls_table *nls_cp)
160{ 160{
161 char *bcc_ptr = *pbcc_area; 161 char *bcc_ptr = *pbcc_area;
162 int len;
162 163
163 /* copy user */ 164 /* copy user */
164 /* BB what about null user mounts - check that we do this BB */ 165 /* BB what about null user mounts - check that we do this BB */
165 /* copy user */ 166 /* copy user */
166 if (ses->user_name != NULL) { 167 if (ses->user_name != NULL) {
167 strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); 168 len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
168 bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); 169 if (WARN_ON_ONCE(len < 0))
170 len = CIFS_MAX_USERNAME_LEN - 1;
171 bcc_ptr += len;
169 } 172 }
170 /* else null user mount */ 173 /* else null user mount */
171 *bcc_ptr = 0; 174 *bcc_ptr = 0;
@@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
173 176
174 /* copy domain */ 177 /* copy domain */
175 if (ses->domainName != NULL) { 178 if (ses->domainName != NULL) {
176 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 179 len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
177 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 180 if (WARN_ON_ONCE(len < 0))
181 len = CIFS_MAX_DOMAINNAME_LEN - 1;
182 bcc_ptr += len;
178 } /* else we will send a null domain name 183 } /* else we will send a null domain name
179 so the server will default to its own domain */ 184 so the server will default to its own domain */
180 *bcc_ptr = 0; 185 *bcc_ptr = 0;
@@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
242 247
243 kfree(ses->serverOS); 248 kfree(ses->serverOS);
244 249
245 ses->serverOS = kzalloc(len + 1, GFP_KERNEL); 250 ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
246 if (ses->serverOS) { 251 if (ses->serverOS) {
247 strncpy(ses->serverOS, bcc_ptr, len); 252 memcpy(ses->serverOS, bcc_ptr, len);
253 ses->serverOS[len] = 0;
248 if (strncmp(ses->serverOS, "OS/2", 4) == 0) 254 if (strncmp(ses->serverOS, "OS/2", 4) == 0)
249 cifs_dbg(FYI, "OS/2 server\n"); 255 cifs_dbg(FYI, "OS/2 server\n");
250 } 256 }
@@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
258 264
259 kfree(ses->serverNOS); 265 kfree(ses->serverNOS);
260 266
261 ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); 267 ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
262 if (ses->serverNOS) 268 if (ses->serverNOS) {
263 strncpy(ses->serverNOS, bcc_ptr, len); 269 memcpy(ses->serverNOS, bcc_ptr, len);
270 ses->serverNOS[len] = 0;
271 }
264 272
265 bcc_ptr += len + 1; 273 bcc_ptr += len + 1;
266 bleft -= len + 1; 274 bleft -= len + 1;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index a5bc1b671c12..64a5864127be 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -3489,7 +3489,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
3489static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, 3489static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3490 unsigned int buflen) 3490 unsigned int buflen)
3491{ 3491{
3492 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 3492 void *addr;
3493 /*
3494 * VMAP_STACK (at least) puts stack into the vmalloc address space
3495 */
3496 if (is_vmalloc_addr(buf))
3497 addr = vmalloc_to_page(buf);
3498 else
3499 addr = virt_to_page(buf);
3500 sg_set_page(sg, addr, buflen, offset_in_page(buf));
3493} 3501}
3494 3502
3495/* Assumes the first rqst has a transform header as the first iov. 3503/* Assumes the first rqst has a transform header as the first iov.
@@ -4070,7 +4078,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
4070{ 4078{
4071 int ret, length; 4079 int ret, length;
4072 char *buf = server->smallbuf; 4080 char *buf = server->smallbuf;
4073 char *tmpbuf;
4074 struct smb2_sync_hdr *shdr; 4081 struct smb2_sync_hdr *shdr;
4075 unsigned int pdu_length = server->pdu_size; 4082 unsigned int pdu_length = server->pdu_size;
4076 unsigned int buf_size; 4083 unsigned int buf_size;
@@ -4100,18 +4107,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
4100 return length; 4107 return length;
4101 4108
4102 next_is_large = server->large_buf; 4109 next_is_large = server->large_buf;
4103 one_more: 4110one_more:
4104 shdr = (struct smb2_sync_hdr *)buf; 4111 shdr = (struct smb2_sync_hdr *)buf;
4105 if (shdr->NextCommand) { 4112 if (shdr->NextCommand) {
4106 if (next_is_large) { 4113 if (next_is_large)
4107 tmpbuf = server->bigbuf;
4108 next_buffer = (char *)cifs_buf_get(); 4114 next_buffer = (char *)cifs_buf_get();
4109 } else { 4115 else
4110 tmpbuf = server->smallbuf;
4111 next_buffer = (char *)cifs_small_buf_get(); 4116 next_buffer = (char *)cifs_small_buf_get();
4112 }
4113 memcpy(next_buffer, 4117 memcpy(next_buffer,
4114 tmpbuf + le32_to_cpu(shdr->NextCommand), 4118 buf + le32_to_cpu(shdr->NextCommand),
4115 pdu_length - le32_to_cpu(shdr->NextCommand)); 4119 pdu_length - le32_to_cpu(shdr->NextCommand));
4116 } 4120 }
4117 4121
@@ -4140,12 +4144,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
4140 pdu_length -= le32_to_cpu(shdr->NextCommand); 4144 pdu_length -= le32_to_cpu(shdr->NextCommand);
4141 server->large_buf = next_is_large; 4145 server->large_buf = next_is_large;
4142 if (next_is_large) 4146 if (next_is_large)
4143 server->bigbuf = next_buffer; 4147 server->bigbuf = buf = next_buffer;
4144 else 4148 else
4145 server->smallbuf = next_buffer; 4149 server->smallbuf = buf = next_buffer;
4146
4147 buf += le32_to_cpu(shdr->NextCommand);
4148 goto one_more; 4150 goto one_more;
4151 } else if (ret != 0) {
4152 /*
4153 * ret != 0 here means that we didn't get to handle_mid() thus
4154 * server->smallbuf and server->bigbuf are still valid. We need
4155 * to free next_buffer because it is not going to be used
4156 * anywhere.
4157 */
4158 if (next_is_large)
4159 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4160 else
4161 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
4149 } 4162 }
4150 4163
4151 return ret; 4164 return ret;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c8cd7b6cdda2..31e4a1b0b170 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
252 if (tcon == NULL) 252 if (tcon == NULL)
253 return 0; 253 return 0;
254 254
255 if (smb2_command == SMB2_TREE_CONNECT) 255 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
256 return 0; 256 return 0;
257 257
258 if (tcon->tidStatus == CifsExiting) { 258 if (tcon->tidStatus == CifsExiting) {
@@ -1196,7 +1196,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1196 else 1196 else
1197 req->SecurityMode = 0; 1197 req->SecurityMode = 0;
1198 1198
1199#ifdef CONFIG_CIFS_DFS_UPCALL
1200 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1201#else
1199 req->Capabilities = 0; 1202 req->Capabilities = 0;
1203#endif /* DFS_UPCALL */
1204
1200 req->Channel = 0; /* MBZ */ 1205 req->Channel = 0; /* MBZ */
1201 1206
1202 sess_data->iov[0].iov_base = (char *)req; 1207 sess_data->iov[0].iov_base = (char *)req;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6e30949d9f77..a7ec2d3dff92 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -638,9 +638,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
638COMPATIBLE_IOCTL(PPPIOCATTCHAN) 638COMPATIBLE_IOCTL(PPPIOCATTCHAN)
639COMPATIBLE_IOCTL(PPPIOCGCHAN) 639COMPATIBLE_IOCTL(PPPIOCGCHAN)
640COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS) 640COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
641/* PPPOX */
642COMPATIBLE_IOCTL(PPPOEIOCSFWD)
643COMPATIBLE_IOCTL(PPPOEIOCDFWD)
644/* Big A */ 641/* Big A */
645/* sparc only */ 642/* sparc only */
646/* Big Q for sound/OSS */ 643/* Big Q for sound/OSS */
diff --git a/fs/coredump.c b/fs/coredump.c
index e42e17e55bfd..b1ea7dfbd149 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -7,6 +7,7 @@
7#include <linux/stat.h> 7#include <linux/stat.h>
8#include <linux/fcntl.h> 8#include <linux/fcntl.h>
9#include <linux/swap.h> 9#include <linux/swap.h>
10#include <linux/ctype.h>
10#include <linux/string.h> 11#include <linux/string.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/pagemap.h> 13#include <linux/pagemap.h>
@@ -187,11 +188,13 @@ put_exe_file:
187 * name into corename, which must have space for at least 188 * name into corename, which must have space for at least
188 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 189 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
189 */ 190 */
190static int format_corename(struct core_name *cn, struct coredump_params *cprm) 191static int format_corename(struct core_name *cn, struct coredump_params *cprm,
192 size_t **argv, int *argc)
191{ 193{
192 const struct cred *cred = current_cred(); 194 const struct cred *cred = current_cred();
193 const char *pat_ptr = core_pattern; 195 const char *pat_ptr = core_pattern;
194 int ispipe = (*pat_ptr == '|'); 196 int ispipe = (*pat_ptr == '|');
197 bool was_space = false;
195 int pid_in_pattern = 0; 198 int pid_in_pattern = 0;
196 int err = 0; 199 int err = 0;
197 200
@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
201 return -ENOMEM; 204 return -ENOMEM;
202 cn->corename[0] = '\0'; 205 cn->corename[0] = '\0';
203 206
204 if (ispipe) 207 if (ispipe) {
208 int argvs = sizeof(core_pattern) / 2;
209 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
210 if (!(*argv))
211 return -ENOMEM;
212 (*argv)[(*argc)++] = 0;
205 ++pat_ptr; 213 ++pat_ptr;
214 }
206 215
207 /* Repeat as long as we have more pattern to process and more output 216 /* Repeat as long as we have more pattern to process and more output
208 space */ 217 space */
209 while (*pat_ptr) { 218 while (*pat_ptr) {
219 /*
220 * Split on spaces before doing template expansion so that
221 * %e and %E don't get split if they have spaces in them
222 */
223 if (ispipe) {
224 if (isspace(*pat_ptr)) {
225 was_space = true;
226 pat_ptr++;
227 continue;
228 } else if (was_space) {
229 was_space = false;
230 err = cn_printf(cn, "%c", '\0');
231 if (err)
232 return err;
233 (*argv)[(*argc)++] = cn->used;
234 }
235 }
210 if (*pat_ptr != '%') { 236 if (*pat_ptr != '%') {
211 err = cn_printf(cn, "%c", *pat_ptr++); 237 err = cn_printf(cn, "%c", *pat_ptr++);
212 } else { 238 } else {
@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
546 struct cred *cred; 572 struct cred *cred;
547 int retval = 0; 573 int retval = 0;
548 int ispipe; 574 int ispipe;
575 size_t *argv = NULL;
576 int argc = 0;
549 struct files_struct *displaced; 577 struct files_struct *displaced;
550 /* require nonrelative corefile path and be extra careful */ 578 /* require nonrelative corefile path and be extra careful */
551 bool need_suid_safe = false; 579 bool need_suid_safe = false;
@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo)
592 620
593 old_cred = override_creds(cred); 621 old_cred = override_creds(cred);
594 622
595 ispipe = format_corename(&cn, &cprm); 623 ispipe = format_corename(&cn, &cprm, &argv, &argc);
596 624
597 if (ispipe) { 625 if (ispipe) {
626 int argi;
598 int dump_count; 627 int dump_count;
599 char **helper_argv; 628 char **helper_argv;
600 struct subprocess_info *sub_info; 629 struct subprocess_info *sub_info;
@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
637 goto fail_dropcount; 666 goto fail_dropcount;
638 } 667 }
639 668
640 helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); 669 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
670 GFP_KERNEL);
641 if (!helper_argv) { 671 if (!helper_argv) {
642 printk(KERN_WARNING "%s failed to allocate memory\n", 672 printk(KERN_WARNING "%s failed to allocate memory\n",
643 __func__); 673 __func__);
644 goto fail_dropcount; 674 goto fail_dropcount;
645 } 675 }
676 for (argi = 0; argi < argc; argi++)
677 helper_argv[argi] = cn.corename + argv[argi];
678 helper_argv[argi] = NULL;
646 679
647 retval = -ENOMEM; 680 retval = -ENOMEM;
648 sub_info = call_usermodehelper_setup(helper_argv[0], 681 sub_info = call_usermodehelper_setup(helper_argv[0],
@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
652 retval = call_usermodehelper_exec(sub_info, 685 retval = call_usermodehelper_exec(sub_info,
653 UMH_WAIT_EXEC); 686 UMH_WAIT_EXEC);
654 687
655 argv_free(helper_argv); 688 kfree(helper_argv);
656 if (retval) { 689 if (retval) {
657 printk(KERN_INFO "Core dump to |%s pipe failed\n", 690 printk(KERN_INFO "Core dump to |%s pipe failed\n",
658 cn.corename); 691 cn.corename);
@@ -766,6 +799,7 @@ fail_dropcount:
766 if (ispipe) 799 if (ispipe)
767 atomic_dec(&core_dump_count); 800 atomic_dec(&core_dump_count);
768fail_unlock: 801fail_unlock:
802 kfree(argv);
769 kfree(cn.corename); 803 kfree(cn.corename);
770 coredump_finish(mm, core_dumped); 804 coredump_finish(mm, core_dumped);
771 revert_creds(old_cred); 805 revert_creds(old_cred);
diff --git a/fs/dax.c b/fs/dax.c
index a237141d8787..6bf81f931de3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
266static void put_unlocked_entry(struct xa_state *xas, void *entry) 266static void put_unlocked_entry(struct xa_state *xas, void *entry)
267{ 267{
268 /* If we were the only waiter woken, wake the next one */ 268 /* If we were the only waiter woken, wake the next one */
269 if (entry && dax_is_conflict(entry)) 269 if (entry && !dax_is_conflict(entry))
270 dax_wake_entry(xas, entry, false); 270 dax_wake_entry(xas, entry, false);
271} 271}
272 272
@@ -600,7 +600,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
600 * guaranteed to either see new references or prevent new 600 * guaranteed to either see new references or prevent new
601 * references from being established. 601 * references from being established.
602 */ 602 */
603 unmap_mapping_range(mapping, 0, 0, 1); 603 unmap_mapping_range(mapping, 0, 0, 0);
604 604
605 xas_lock_irq(&xas); 605 xas_lock_irq(&xas);
606 xas_for_each(&xas, entry, ULONG_MAX) { 606 xas_for_each(&xas, entry, ULONG_MAX) {
diff --git a/fs/exec.c b/fs/exec.c
index c71cbfe6826a..f7f6a140856a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1828,7 +1828,7 @@ static int __do_execve_file(int fd, struct filename *filename,
1828 membarrier_execve(current); 1828 membarrier_execve(current);
1829 rseq_execve(current); 1829 rseq_execve(current);
1830 acct_update_integrals(current); 1830 acct_update_integrals(current);
1831 task_numa_free(current); 1831 task_numa_free(current, false);
1832 free_bprm(bprm); 1832 free_bprm(bprm);
1833 kfree(pathbuf); 1833 kfree(pathbuf);
1834 if (filename) 1834 if (filename)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f8d46df8fa9e..3e58a6f697dd 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
1653static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) 1653static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1654{ 1654{
1655 struct f2fs_inode_info *fi = F2FS_I(inode); 1655 struct f2fs_inode_info *fi = F2FS_I(inode);
1656 u32 oldflags;
1657 1656
1658 /* Is it quota file? Do not allow user to mess with it */ 1657 /* Is it quota file? Do not allow user to mess with it */
1659 if (IS_NOQUOTA(inode)) 1658 if (IS_NOQUOTA(inode))
1660 return -EPERM; 1659 return -EPERM;
1661 1660
1662 oldflags = fi->i_flags; 1661 fi->i_flags = iflags | (fi->i_flags & ~mask);
1663
1664 if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
1665 if (!capable(CAP_LINUX_IMMUTABLE))
1666 return -EPERM;
1667
1668 fi->i_flags = iflags | (oldflags & ~mask);
1669 1662
1670 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1663 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1671 set_inode_flag(inode, FI_PROJ_INHERIT); 1664 set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1770static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1763static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1771{ 1764{
1772 struct inode *inode = file_inode(filp); 1765 struct inode *inode = file_inode(filp);
1773 u32 fsflags; 1766 struct f2fs_inode_info *fi = F2FS_I(inode);
1767 u32 fsflags, old_fsflags;
1774 u32 iflags; 1768 u32 iflags;
1775 int ret; 1769 int ret;
1776 1770
@@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1794 1788
1795 inode_lock(inode); 1789 inode_lock(inode);
1796 1790
1791 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1792 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1793 if (ret)
1794 goto out;
1795
1797 ret = f2fs_setflags_common(inode, iflags, 1796 ret = f2fs_setflags_common(inode, iflags,
1798 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); 1797 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1798out:
1799 inode_unlock(inode); 1799 inode_unlock(inode);
1800 mnt_drop_write_file(filp); 1800 mnt_drop_write_file(filp);
1801 return ret; 1801 return ret;
@@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags)
2855 return iflags; 2855 return iflags;
2856} 2856}
2857 2857
2858static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2858static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
2859{ 2859{
2860 struct inode *inode = file_inode(filp);
2861 struct f2fs_inode_info *fi = F2FS_I(inode); 2860 struct f2fs_inode_info *fi = F2FS_I(inode);
2862 struct fsxattr fa;
2863 2861
2864 memset(&fa, 0, sizeof(struct fsxattr)); 2862 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
2865 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
2866 2863
2867 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) 2864 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2868 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2865 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
2869 fi->i_projid);
2870
2871 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2872 return -EFAULT;
2873 return 0;
2874} 2866}
2875 2867
2876static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa) 2868static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2877{ 2869{
2878 /* 2870 struct inode *inode = file_inode(filp);
2879 * Project Quota ID state is only allowed to change from within the init 2871 struct fsxattr fa;
2880 * namespace. Enforce that restriction only if we are trying to change
2881 * the quota ID state. Everything else is allowed in user namespaces.
2882 */
2883 if (current_user_ns() == &init_user_ns)
2884 return 0;
2885 2872
2886 if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid) 2873 f2fs_fill_fsxattr(inode, &fa);
2887 return -EINVAL;
2888
2889 if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
2890 if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
2891 return -EINVAL;
2892 } else {
2893 if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
2894 return -EINVAL;
2895 }
2896 2874
2875 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2876 return -EFAULT;
2897 return 0; 2877 return 0;
2898} 2878}
2899 2879
2900static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2880static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2901{ 2881{
2902 struct inode *inode = file_inode(filp); 2882 struct inode *inode = file_inode(filp);
2903 struct fsxattr fa; 2883 struct fsxattr fa, old_fa;
2904 u32 iflags; 2884 u32 iflags;
2905 int err; 2885 int err;
2906 2886
@@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2923 return err; 2903 return err;
2924 2904
2925 inode_lock(inode); 2905 inode_lock(inode);
2926 err = f2fs_ioctl_check_project(inode, &fa); 2906
2907 f2fs_fill_fsxattr(inode, &old_fa);
2908 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
2927 if (err) 2909 if (err)
2928 goto out; 2910 goto out;
2911
2929 err = f2fs_setflags_common(inode, iflags, 2912 err = f2fs_setflags_common(inode, iflags,
2930 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); 2913 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
2931 if (err) 2914 if (err)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6691f526fa40..8974672db78f 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
796 if (lfs_mode) 796 if (lfs_mode)
797 down_write(&fio.sbi->io_order_lock); 797 down_write(&fio.sbi->io_order_lock);
798 798
799 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
800 fio.old_blkaddr, false);
801 if (!mpage)
802 goto up_out;
803
804 fio.encrypted_page = mpage;
805
806 /* read source block in mpage */
807 if (!PageUptodate(mpage)) {
808 err = f2fs_submit_page_bio(&fio);
809 if (err) {
810 f2fs_put_page(mpage, 1);
811 goto up_out;
812 }
813 lock_page(mpage);
814 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
815 !PageUptodate(mpage))) {
816 err = -EIO;
817 f2fs_put_page(mpage, 1);
818 goto up_out;
819 }
820 }
821
799 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 822 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
800 &sum, CURSEG_COLD_DATA, NULL, false); 823 &sum, CURSEG_COLD_DATA, NULL, false);
801 824
@@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
803 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); 826 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
804 if (!fio.encrypted_page) { 827 if (!fio.encrypted_page) {
805 err = -ENOMEM; 828 err = -ENOMEM;
806 goto recover_block;
807 }
808
809 mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
810 fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
811 if (mpage) {
812 bool updated = false;
813
814 if (PageUptodate(mpage)) {
815 memcpy(page_address(fio.encrypted_page),
816 page_address(mpage), PAGE_SIZE);
817 updated = true;
818 }
819 f2fs_put_page(mpage, 1); 829 f2fs_put_page(mpage, 1);
820 invalidate_mapping_pages(META_MAPPING(fio.sbi), 830 goto recover_block;
821 fio.old_blkaddr, fio.old_blkaddr);
822 if (updated)
823 goto write_page;
824 }
825
826 err = f2fs_submit_page_bio(&fio);
827 if (err)
828 goto put_page_out;
829
830 /* write page */
831 lock_page(fio.encrypted_page);
832
833 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
834 err = -EIO;
835 goto put_page_out;
836 }
837 if (unlikely(!PageUptodate(fio.encrypted_page))) {
838 err = -EIO;
839 goto put_page_out;
840 } 831 }
841 832
842write_page: 833 /* write target block */
843 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); 834 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
835 memcpy(page_address(fio.encrypted_page),
836 page_address(mpage), PAGE_SIZE);
837 f2fs_put_page(mpage, 1);
838 invalidate_mapping_pages(META_MAPPING(fio.sbi),
839 fio.old_blkaddr, fio.old_blkaddr);
840
844 set_page_dirty(fio.encrypted_page); 841 set_page_dirty(fio.encrypted_page);
845 if (clear_page_dirty_for_io(fio.encrypted_page)) 842 if (clear_page_dirty_for_io(fio.encrypted_page))
846 dec_page_count(fio.sbi, F2FS_DIRTY_META); 843 dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -871,11 +868,12 @@ write_page:
871put_page_out: 868put_page_out:
872 f2fs_put_page(fio.encrypted_page, 1); 869 f2fs_put_page(fio.encrypted_page, 1);
873recover_block: 870recover_block:
874 if (lfs_mode)
875 up_write(&fio.sbi->io_order_lock);
876 if (err) 871 if (err)
877 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 872 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
878 true, true); 873 true, true);
874up_out:
875 if (lfs_mode)
876 up_write(&fio.sbi->io_order_lock);
879put_out: 877put_out:
880 f2fs_put_dnode(&dn); 878 f2fs_put_dnode(&dn);
881out: 879out:
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 6de6cda44031..78a1b873e48a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2422 size_t crc_offset = 0; 2422 size_t crc_offset = 0;
2423 __u32 crc = 0; 2423 __u32 crc = 0;
2424 2424
2425 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
2426 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2427 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2428 return -EINVAL;
2429 }
2430
2425 /* Check checksum_offset and crc in superblock */ 2431 /* Check checksum_offset and crc in superblock */
2426 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) { 2432 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2427 crc_offset = le32_to_cpu(raw_super->checksum_offset); 2433 crc_offset = le32_to_cpu(raw_super->checksum_offset);
@@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2429 offsetof(struct f2fs_super_block, crc)) { 2435 offsetof(struct f2fs_super_block, crc)) {
2430 f2fs_info(sbi, "Invalid SB checksum offset: %zu", 2436 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2431 crc_offset); 2437 crc_offset);
2432 return 1; 2438 return -EFSCORRUPTED;
2433 } 2439 }
2434 crc = le32_to_cpu(raw_super->crc); 2440 crc = le32_to_cpu(raw_super->crc);
2435 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { 2441 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2436 f2fs_info(sbi, "Invalid SB checksum value: %u", crc); 2442 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2437 return 1; 2443 return -EFSCORRUPTED;
2438 } 2444 }
2439 } 2445 }
2440 2446
2441 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2442 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2443 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2444 return 1;
2445 }
2446
2447 /* Currently, support only 4KB page cache size */ 2447 /* Currently, support only 4KB page cache size */
2448 if (F2FS_BLKSIZE != PAGE_SIZE) { 2448 if (F2FS_BLKSIZE != PAGE_SIZE) {
2449 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", 2449 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2450 PAGE_SIZE); 2450 PAGE_SIZE);
2451 return 1; 2451 return -EFSCORRUPTED;
2452 } 2452 }
2453 2453
2454 /* Currently, support only 4KB block size */ 2454 /* Currently, support only 4KB block size */
@@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2456 if (blocksize != F2FS_BLKSIZE) { 2456 if (blocksize != F2FS_BLKSIZE) {
2457 f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", 2457 f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
2458 blocksize); 2458 blocksize);
2459 return 1; 2459 return -EFSCORRUPTED;
2460 } 2460 }
2461 2461
2462 /* check log blocks per segment */ 2462 /* check log blocks per segment */
2463 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { 2463 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2464 f2fs_info(sbi, "Invalid log blocks per segment (%u)", 2464 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2465 le32_to_cpu(raw_super->log_blocks_per_seg)); 2465 le32_to_cpu(raw_super->log_blocks_per_seg));
2466 return 1; 2466 return -EFSCORRUPTED;
2467 } 2467 }
2468 2468
2469 /* Currently, support 512/1024/2048/4096 bytes sector size */ 2469 /* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2473 F2FS_MIN_LOG_SECTOR_SIZE) { 2473 F2FS_MIN_LOG_SECTOR_SIZE) {
2474 f2fs_info(sbi, "Invalid log sectorsize (%u)", 2474 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2475 le32_to_cpu(raw_super->log_sectorsize)); 2475 le32_to_cpu(raw_super->log_sectorsize));
2476 return 1; 2476 return -EFSCORRUPTED;
2477 } 2477 }
2478 if (le32_to_cpu(raw_super->log_sectors_per_block) + 2478 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2479 le32_to_cpu(raw_super->log_sectorsize) != 2479 le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2481 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", 2481 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2482 le32_to_cpu(raw_super->log_sectors_per_block), 2482 le32_to_cpu(raw_super->log_sectors_per_block),
2483 le32_to_cpu(raw_super->log_sectorsize)); 2483 le32_to_cpu(raw_super->log_sectorsize));
2484 return 1; 2484 return -EFSCORRUPTED;
2485 } 2485 }
2486 2486
2487 segment_count = le32_to_cpu(raw_super->segment_count); 2487 segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2495 if (segment_count > F2FS_MAX_SEGMENT || 2495 if (segment_count > F2FS_MAX_SEGMENT ||
2496 segment_count < F2FS_MIN_SEGMENTS) { 2496 segment_count < F2FS_MIN_SEGMENTS) {
2497 f2fs_info(sbi, "Invalid segment count (%u)", segment_count); 2497 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2498 return 1; 2498 return -EFSCORRUPTED;
2499 } 2499 }
2500 2500
2501 if (total_sections > segment_count || 2501 if (total_sections > segment_count ||
@@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2503 segs_per_sec > segment_count || !segs_per_sec) { 2503 segs_per_sec > segment_count || !segs_per_sec) {
2504 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", 2504 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2505 segment_count, total_sections, segs_per_sec); 2505 segment_count, total_sections, segs_per_sec);
2506 return 1; 2506 return -EFSCORRUPTED;
2507 } 2507 }
2508 2508
2509 if ((segment_count / segs_per_sec) < total_sections) { 2509 if ((segment_count / segs_per_sec) < total_sections) {
2510 f2fs_info(sbi, "Small segment_count (%u < %u * %u)", 2510 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2511 segment_count, segs_per_sec, total_sections); 2511 segment_count, segs_per_sec, total_sections);
2512 return 1; 2512 return -EFSCORRUPTED;
2513 } 2513 }
2514 2514
2515 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { 2515 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2516 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", 2516 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2517 segment_count, le64_to_cpu(raw_super->block_count)); 2517 segment_count, le64_to_cpu(raw_super->block_count));
2518 return 1; 2518 return -EFSCORRUPTED;
2519 } 2519 }
2520 2520
2521 if (secs_per_zone > total_sections || !secs_per_zone) { 2521 if (secs_per_zone > total_sections || !secs_per_zone) {
2522 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", 2522 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2523 secs_per_zone, total_sections); 2523 secs_per_zone, total_sections);
2524 return 1; 2524 return -EFSCORRUPTED;
2525 } 2525 }
2526 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || 2526 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2527 raw_super->hot_ext_count > F2FS_MAX_EXTENSION || 2527 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2531 le32_to_cpu(raw_super->extension_count), 2531 le32_to_cpu(raw_super->extension_count),
2532 raw_super->hot_ext_count, 2532 raw_super->hot_ext_count,
2533 F2FS_MAX_EXTENSION); 2533 F2FS_MAX_EXTENSION);
2534 return 1; 2534 return -EFSCORRUPTED;
2535 } 2535 }
2536 2536
2537 if (le32_to_cpu(raw_super->cp_payload) > 2537 if (le32_to_cpu(raw_super->cp_payload) >
@@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2539 f2fs_info(sbi, "Insane cp_payload (%u > %u)", 2539 f2fs_info(sbi, "Insane cp_payload (%u > %u)",
2540 le32_to_cpu(raw_super->cp_payload), 2540 le32_to_cpu(raw_super->cp_payload),
2541 blocks_per_seg - F2FS_CP_PACKS); 2541 blocks_per_seg - F2FS_CP_PACKS);
2542 return 1; 2542 return -EFSCORRUPTED;
2543 } 2543 }
2544 2544
2545 /* check reserved ino info */ 2545 /* check reserved ino info */
@@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2550 le32_to_cpu(raw_super->node_ino), 2550 le32_to_cpu(raw_super->node_ino),
2551 le32_to_cpu(raw_super->meta_ino), 2551 le32_to_cpu(raw_super->meta_ino),
2552 le32_to_cpu(raw_super->root_ino)); 2552 le32_to_cpu(raw_super->root_ino));
2553 return 1; 2553 return -EFSCORRUPTED;
2554 } 2554 }
2555 2555
2556 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ 2556 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2557 if (sanity_check_area_boundary(sbi, bh)) 2557 if (sanity_check_area_boundary(sbi, bh))
2558 return 1; 2558 return -EFSCORRUPTED;
2559 2559
2560 return 0; 2560 return 0;
2561} 2561}
@@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
2870 } 2870 }
2871 2871
2872 /* sanity checking of raw super */ 2872 /* sanity checking of raw super */
2873 if (sanity_check_raw_super(sbi, bh)) { 2873 err = sanity_check_raw_super(sbi, bh);
2874 if (err) {
2874 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", 2875 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
2875 block + 1); 2876 block + 1);
2876 err = -EFSCORRUPTED;
2877 brelse(bh); 2877 brelse(bh);
2878 continue; 2878 continue;
2879 } 2879 }
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 79581b9bdebb..4f8b5fd6c81f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
390 return mp->mp_aheight - x - 1; 390 return mp->mp_aheight - x - 1;
391} 391}
392 392
393static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
394{
395 sector_t factor = 1, block = 0;
396 int hgt;
397
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
402 }
403 return block;
404}
405
393static void release_metapath(struct metapath *mp) 406static void release_metapath(struct metapath *mp)
394{ 407{
395 int i; 408 int i;
@@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
430 return ptr - first; 443 return ptr - first;
431} 444}
432 445
433typedef const __be64 *(*gfs2_metadata_walker)( 446enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
434 struct metapath *mp, 447
435 const __be64 *start, const __be64 *end, 448/*
436 u64 factor, void *data); 449 * gfs2_metadata_walker - walk an indirect block
450 * @mp: Metapath to indirect block
451 * @ptrs: Number of pointers to look at
452 *
453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
454 * indirect block to follow.
455 */
456typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
457 unsigned int ptrs);
437 458
438#define WALK_STOP ((__be64 *)0) 459/*
439#define WALK_NEXT ((__be64 *)1) 460 * gfs2_walk_metadata - walk a tree of indirect blocks
461 * @inode: The inode
462 * @mp: Starting point of walk
463 * @max_len: Maximum number of blocks to walk
464 * @walker: Called during the walk
465 *
466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
467 * past the end of metadata, and a negative error code otherwise.
468 */
440 469
441static int gfs2_walk_metadata(struct inode *inode, sector_t lblock, 470static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
442 u64 len, struct metapath *mp, gfs2_metadata_walker walker, 471 u64 max_len, gfs2_metadata_walker walker)
443 void *data)
444{ 472{
445 struct metapath clone;
446 struct gfs2_inode *ip = GFS2_I(inode); 473 struct gfs2_inode *ip = GFS2_I(inode);
447 struct gfs2_sbd *sdp = GFS2_SB(inode); 474 struct gfs2_sbd *sdp = GFS2_SB(inode);
448 const __be64 *start, *end, *ptr;
449 u64 factor = 1; 475 u64 factor = 1;
450 unsigned int hgt; 476 unsigned int hgt;
451 int ret = 0; 477 int ret;
452 478
453 for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--) 479 /*
480 * The walk starts in the lowest allocated indirect block, which may be
481 * before the position indicated by @mp. Adjust @max_len accordingly
482 * to avoid a short walk.
483 */
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
454 factor *= sdp->sd_inptrs; 487 factor *= sdp->sd_inptrs;
488 }
455 489
456 for (;;) { 490 for (;;) {
457 u64 step; 491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
493 unsigned int ptrs;
494 u64 len;
458 495
459 /* Walk indirect block. */ 496 /* Walk indirect block. */
460 start = metapointer(hgt, mp); 497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
461 end = metaend(hgt, mp); 498 len = ptrs * factor;
462 499 if (len > max_len)
463 step = (end - start) * factor; 500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
464 if (step > len) 501 status = walker(mp, ptrs);
465 end = start + DIV_ROUND_UP_ULL(len, factor); 502 switch (status) {
466 503 case WALK_STOP:
467 ptr = walker(mp, start, end, factor, data); 504 return 1;
468 if (ptr == WALK_STOP) 505 case WALK_FOLLOW:
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
508 len = ptrs * factor;
469 break; 509 break;
470 if (step >= len) 510 case WALK_CONTINUE:
471 break; 511 break;
472 len -= step;
473 if (ptr != WALK_NEXT) {
474 BUG_ON(!*ptr);
475 mp->mp_list[hgt] += ptr - start;
476 goto fill_up_metapath;
477 } 512 }
513 if (len >= max_len)
514 break;
515 max_len -= len;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
478 518
479lower_metapath: 519lower_metapath:
480 /* Decrease height of metapath. */ 520 /* Decrease height of metapath. */
481 if (mp != &clone) {
482 clone_metapath(&clone, mp);
483 mp = &clone;
484 }
485 brelse(mp->mp_bh[hgt]); 521 brelse(mp->mp_bh[hgt]);
486 mp->mp_bh[hgt] = NULL; 522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
487 if (!hgt) 524 if (!hgt)
488 break; 525 break;
489 hgt--; 526 hgt--;
@@ -491,10 +528,7 @@ lower_metapath:
491 528
492 /* Advance in metadata tree. */ 529 /* Advance in metadata tree. */
493 (mp->mp_list[hgt])++; 530 (mp->mp_list[hgt])++;
494 start = metapointer(hgt, mp); 531 if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
495 end = metaend(hgt, mp);
496 if (start >= end) {
497 mp->mp_list[hgt] = 0;
498 if (!hgt) 532 if (!hgt)
499 break; 533 break;
500 goto lower_metapath; 534 goto lower_metapath;
@@ -502,44 +536,36 @@ lower_metapath:
502 536
503fill_up_metapath: 537fill_up_metapath:
504 /* Increase height of metapath. */ 538 /* Increase height of metapath. */
505 if (mp != &clone) {
506 clone_metapath(&clone, mp);
507 mp = &clone;
508 }
509 ret = fillup_metapath(ip, mp, ip->i_height - 1); 539 ret = fillup_metapath(ip, mp, ip->i_height - 1);
510 if (ret < 0) 540 if (ret < 0)
511 break; 541 return ret;
512 hgt += ret; 542 hgt += ret;
513 for (; ret; ret--) 543 for (; ret; ret--)
514 do_div(factor, sdp->sd_inptrs); 544 do_div(factor, sdp->sd_inptrs);
515 mp->mp_aheight = hgt + 1; 545 mp->mp_aheight = hgt + 1;
516 } 546 }
517 if (mp == &clone) 547 return 0;
518 release_metapath(mp);
519 return ret;
520} 548}
521 549
522struct gfs2_hole_walker_args { 550static enum walker_status gfs2_hole_walker(struct metapath *mp,
523 u64 blocks; 551 unsigned int ptrs)
524};
525
526static const __be64 *gfs2_hole_walker(struct metapath *mp,
527 const __be64 *start, const __be64 *end,
528 u64 factor, void *data)
529{ 552{
530 struct gfs2_hole_walker_args *args = data; 553 const __be64 *start, *ptr, *end;
531 const __be64 *ptr; 554 unsigned int hgt;
555
556 hgt = mp->mp_aheight - 1;
557 start = metapointer(hgt, mp);
558 end = start + ptrs;
532 559
533 for (ptr = start; ptr < end; ptr++) { 560 for (ptr = start; ptr < end; ptr++) {
534 if (*ptr) { 561 if (*ptr) {
535 args->blocks += (ptr - start) * factor; 562 mp->mp_list[hgt] += ptr - start;
536 if (mp->mp_aheight == mp->mp_fheight) 563 if (mp->mp_aheight == mp->mp_fheight)
537 return WALK_STOP; 564 return WALK_STOP;
538 return ptr; /* increase height */ 565 return WALK_FOLLOW;
539 } 566 }
540 } 567 }
541 args->blocks += (end - start) * factor; 568 return WALK_CONTINUE;
542 return WALK_NEXT;
543} 569}
544 570
545/** 571/**
@@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
557static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len, 583static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
558 struct metapath *mp, struct iomap *iomap) 584 struct metapath *mp, struct iomap *iomap)
559{ 585{
560 struct gfs2_hole_walker_args args = { }; 586 struct metapath clone;
561 int ret = 0; 587 u64 hole_size;
588 int ret;
562 589
563 ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args); 590 clone_metapath(&clone, mp);
564 if (!ret) 591 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
565 iomap->length = args.blocks << inode->i_blkbits; 592 if (ret < 0)
593 goto out;
594
595 if (ret == 1)
596 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
597 else
598 hole_size = len;
599 iomap->length = hole_size << inode->i_blkbits;
600 ret = 0;
601
602out:
603 release_metapath(&clone);
566 return ret; 604 return ret;
567} 605}
568 606
@@ -1002,11 +1040,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1002 unsigned copied, struct page *page, 1040 unsigned copied, struct page *page,
1003 struct iomap *iomap) 1041 struct iomap *iomap)
1004{ 1042{
1043 struct gfs2_trans *tr = current->journal_info;
1005 struct gfs2_inode *ip = GFS2_I(inode); 1044 struct gfs2_inode *ip = GFS2_I(inode);
1006 struct gfs2_sbd *sdp = GFS2_SB(inode); 1045 struct gfs2_sbd *sdp = GFS2_SB(inode);
1007 1046
1008 if (page && !gfs2_is_stuffed(ip)) 1047 if (page && !gfs2_is_stuffed(ip))
1009 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); 1048 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1049
1050 if (tr->tr_num_buf_new)
1051 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1052
1010 gfs2_trans_end(sdp); 1053 gfs2_trans_end(sdp);
1011} 1054}
1012 1055
@@ -1099,8 +1142,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1099 tr = current->journal_info; 1142 tr = current->journal_info;
1100 if (tr->tr_num_buf_new) 1143 if (tr->tr_num_buf_new)
1101 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1144 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1102 else
1103 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
1104 1145
1105 gfs2_trans_end(sdp); 1146 gfs2_trans_end(sdp);
1106 } 1147 }
@@ -1181,10 +1222,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1181 1222
1182 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 1223 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1183 gfs2_quota_unlock(ip); 1224 gfs2_quota_unlock(ip);
1225
1226 if (unlikely(!written))
1227 goto out_unlock;
1228
1184 if (iomap->flags & IOMAP_F_SIZE_CHANGED) 1229 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1185 mark_inode_dirty(inode); 1230 mark_inode_dirty(inode);
1186 gfs2_write_unlock(inode); 1231 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1187 1232
1233out_unlock:
1234 gfs2_write_unlock(inode);
1188out: 1235out:
1189 return 0; 1236 return 0;
1190} 1237}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e2a66e12fbc6..cfb48bd088e1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -202,7 +202,7 @@ struct async_list {
202 202
203 struct file *file; 203 struct file *file;
204 off_t io_end; 204 off_t io_end;
205 size_t io_pages; 205 size_t io_len;
206}; 206};
207 207
208struct io_ring_ctx { 208struct io_ring_ctx {
@@ -333,7 +333,8 @@ struct io_kiocb {
333#define REQ_F_IO_DRAIN 16 /* drain existing IO first */ 333#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
334#define REQ_F_IO_DRAINED 32 /* drain done */ 334#define REQ_F_IO_DRAINED 32 /* drain done */
335#define REQ_F_LINK 64 /* linked sqes */ 335#define REQ_F_LINK 64 /* linked sqes */
336#define REQ_F_FAIL_LINK 128 /* fail rest of links */ 336#define REQ_F_LINK_DONE 128 /* linked sqes done */
337#define REQ_F_FAIL_LINK 256 /* fail rest of links */
337 u64 user_data; 338 u64 user_data;
338 u32 result; 339 u32 result;
339 u32 sequence; 340 u32 sequence;
@@ -429,7 +430,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
429 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) 430 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
430 return false; 431 return false;
431 432
432 return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped; 433 return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
433} 434}
434 435
435static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) 436static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -632,6 +633,7 @@ static void io_req_link_next(struct io_kiocb *req)
632 nxt->flags |= REQ_F_LINK; 633 nxt->flags |= REQ_F_LINK;
633 } 634 }
634 635
636 nxt->flags |= REQ_F_LINK_DONE;
635 INIT_WORK(&nxt->work, io_sq_wq_submit_work); 637 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
636 queue_work(req->ctx->sqo_wq, &nxt->work); 638 queue_work(req->ctx->sqo_wq, &nxt->work);
637 } 639 }
@@ -677,6 +679,13 @@ static void io_put_req(struct io_kiocb *req)
677 io_free_req(req); 679 io_free_req(req);
678} 680}
679 681
682static unsigned io_cqring_events(struct io_cq_ring *ring)
683{
684 /* See comment at the top of this file */
685 smp_rmb();
686 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
687}
688
680/* 689/*
681 * Find and free completed poll iocbs 690 * Find and free completed poll iocbs
682 */ 691 */
@@ -769,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
769static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, 778static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
770 long min) 779 long min)
771{ 780{
772 while (!list_empty(&ctx->poll_list)) { 781 while (!list_empty(&ctx->poll_list) && !need_resched()) {
773 int ret; 782 int ret;
774 783
775 ret = io_do_iopoll(ctx, nr_events, min); 784 ret = io_do_iopoll(ctx, nr_events, min);
@@ -796,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
796 unsigned int nr_events = 0; 805 unsigned int nr_events = 0;
797 806
798 io_iopoll_getevents(ctx, &nr_events, 1); 807 io_iopoll_getevents(ctx, &nr_events, 1);
808
809 /*
810 * Ensure we allow local-to-the-cpu processing to take place,
811 * in this case we need to ensure that we reap all events.
812 */
813 cond_resched();
799 } 814 }
800 mutex_unlock(&ctx->uring_lock); 815 mutex_unlock(&ctx->uring_lock);
801} 816}
@@ -803,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
803static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, 818static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
804 long min) 819 long min)
805{ 820{
806 int ret = 0; 821 int iters, ret = 0;
822
823 /*
824 * We disallow the app entering submit/complete with polling, but we
825 * still need to lock the ring to prevent racing with polled issue
826 * that got punted to a workqueue.
827 */
828 mutex_lock(&ctx->uring_lock);
807 829
830 iters = 0;
808 do { 831 do {
809 int tmin = 0; 832 int tmin = 0;
810 833
834 /*
835 * Don't enter poll loop if we already have events pending.
836 * If we do, we can potentially be spinning for commands that
837 * already triggered a CQE (eg in error).
838 */
839 if (io_cqring_events(ctx->cq_ring))
840 break;
841
842 /*
843 * If a submit got punted to a workqueue, we can have the
844 * application entering polling for a command before it gets
845 * issued. That app will hold the uring_lock for the duration
846 * of the poll right here, so we need to take a breather every
847 * now and then to ensure that the issue has a chance to add
848 * the poll to the issued list. Otherwise we can spin here
849 * forever, while the workqueue is stuck trying to acquire the
850 * very same mutex.
851 */
852 if (!(++iters & 7)) {
853 mutex_unlock(&ctx->uring_lock);
854 mutex_lock(&ctx->uring_lock);
855 }
856
811 if (*nr_events < min) 857 if (*nr_events < min)
812 tmin = min - *nr_events; 858 tmin = min - *nr_events;
813 859
@@ -817,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
817 ret = 0; 863 ret = 0;
818 } while (min && !*nr_events && !need_resched()); 864 } while (min && !*nr_events && !need_resched());
819 865
866 mutex_unlock(&ctx->uring_lock);
820 return ret; 867 return ret;
821} 868}
822 869
@@ -1064,8 +1111,42 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1064 */ 1111 */
1065 offset = buf_addr - imu->ubuf; 1112 offset = buf_addr - imu->ubuf;
1066 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); 1113 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
1067 if (offset) 1114
1068 iov_iter_advance(iter, offset); 1115 if (offset) {
1116 /*
1117 * Don't use iov_iter_advance() here, as it's really slow for
1118 * using the latter parts of a big fixed buffer - it iterates
1119 * over each segment manually. We can cheat a bit here, because
1120 * we know that:
1121 *
1122 * 1) it's a BVEC iter, we set it up
1123 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1124 * first and last bvec
1125 *
1126 * So just find our index, and adjust the iterator afterwards.
1127 * If the offset is within the first bvec (or the whole first
1128 * bvec, just use iov_iter_advance(). This makes it easier
1129 * since we can just skip the first segment, which may not
1130 * be PAGE_SIZE aligned.
1131 */
1132 const struct bio_vec *bvec = imu->bvec;
1133
1134 if (offset <= bvec->bv_len) {
1135 iov_iter_advance(iter, offset);
1136 } else {
1137 unsigned long seg_skip;
1138
1139 /* skip first vec */
1140 offset -= bvec->bv_len;
1141 seg_skip = 1 + (offset >> PAGE_SHIFT);
1142
1143 iter->bvec = bvec + seg_skip;
1144 iter->nr_segs -= seg_skip;
1145 iter->count -= bvec->bv_len + offset;
1146 iter->iov_offset = offset & ~PAGE_MASK;
1147 }
1148 }
1149
1069 return 0; 1150 return 0;
1070} 1151}
1071 1152
@@ -1120,28 +1201,26 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1120 off_t io_end = kiocb->ki_pos + len; 1201 off_t io_end = kiocb->ki_pos + len;
1121 1202
1122 if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) { 1203 if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
1123 unsigned long max_pages; 1204 unsigned long max_bytes;
1124 1205
1125 /* Use 8x RA size as a decent limiter for both reads/writes */ 1206 /* Use 8x RA size as a decent limiter for both reads/writes */
1126 max_pages = filp->f_ra.ra_pages; 1207 max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1127 if (!max_pages) 1208 if (!max_bytes)
1128 max_pages = VM_READAHEAD_PAGES; 1209 max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
1129 max_pages *= 8; 1210
1130 1211 /* If max len are exceeded, reset the state */
1131 /* If max pages are exceeded, reset the state */ 1212 if (async_list->io_len + len <= max_bytes) {
1132 len >>= PAGE_SHIFT;
1133 if (async_list->io_pages + len <= max_pages) {
1134 req->flags |= REQ_F_SEQ_PREV; 1213 req->flags |= REQ_F_SEQ_PREV;
1135 async_list->io_pages += len; 1214 async_list->io_len += len;
1136 } else { 1215 } else {
1137 io_end = 0; 1216 io_end = 0;
1138 async_list->io_pages = 0; 1217 async_list->io_len = 0;
1139 } 1218 }
1140 } 1219 }
1141 1220
1142 /* New file? Reset state. */ 1221 /* New file? Reset state. */
1143 if (async_list->file != filp) { 1222 if (async_list->file != filp) {
1144 async_list->io_pages = 0; 1223 async_list->io_len = 0;
1145 async_list->file = filp; 1224 async_list->file = filp;
1146 } 1225 }
1147 async_list->io_end = io_end; 1226 async_list->io_end = io_end;
@@ -1630,6 +1709,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1630 INIT_LIST_HEAD(&poll->wait.entry); 1709 INIT_LIST_HEAD(&poll->wait.entry);
1631 init_waitqueue_func_entry(&poll->wait, io_poll_wake); 1710 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1632 1711
1712 INIT_LIST_HEAD(&req->list);
1713
1633 mask = vfs_poll(poll->file, &ipt.pt) & poll->events; 1714 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
1634 1715
1635 spin_lock_irq(&ctx->completion_lock); 1716 spin_lock_irq(&ctx->completion_lock);
@@ -1800,6 +1881,7 @@ restart:
1800 do { 1881 do {
1801 struct sqe_submit *s = &req->submit; 1882 struct sqe_submit *s = &req->submit;
1802 const struct io_uring_sqe *sqe = s->sqe; 1883 const struct io_uring_sqe *sqe = s->sqe;
1884 unsigned int flags = req->flags;
1803 1885
1804 /* Ensure we clear previously set non-block flag */ 1886 /* Ensure we clear previously set non-block flag */
1805 req->rw.ki_flags &= ~IOCB_NOWAIT; 1887 req->rw.ki_flags &= ~IOCB_NOWAIT;
@@ -1844,6 +1926,10 @@ restart:
1844 /* async context always use a copy of the sqe */ 1926 /* async context always use a copy of the sqe */
1845 kfree(sqe); 1927 kfree(sqe);
1846 1928
1929 /* req from defer and link list needn't decrease async cnt */
1930 if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
1931 goto out;
1932
1847 if (!async_list) 1933 if (!async_list)
1848 break; 1934 break;
1849 if (!list_empty(&req_list)) { 1935 if (!list_empty(&req_list)) {
@@ -1891,6 +1977,7 @@ restart:
1891 } 1977 }
1892 } 1978 }
1893 1979
1980out:
1894 if (cur_mm) { 1981 if (cur_mm) {
1895 set_fs(old_fs); 1982 set_fs(old_fs);
1896 unuse_mm(cur_mm); 1983 unuse_mm(cur_mm);
@@ -1917,6 +2004,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
1917 ret = true; 2004 ret = true;
1918 spin_lock(&list->lock); 2005 spin_lock(&list->lock);
1919 list_add_tail(&req->list, &list->list); 2006 list_add_tail(&req->list, &list->list);
2007 /*
2008 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2009 */
2010 smp_mb();
1920 if (!atomic_read(&list->cnt)) { 2011 if (!atomic_read(&list->cnt)) {
1921 list_del_init(&req->list); 2012 list_del_init(&req->list);
1922 ret = false; 2013 ret = false;
@@ -1977,6 +2068,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1977{ 2068{
1978 int ret; 2069 int ret;
1979 2070
2071 ret = io_req_defer(ctx, req, s->sqe);
2072 if (ret) {
2073 if (ret != -EIOCBQUEUED) {
2074 io_free_req(req);
2075 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2076 }
2077 return 0;
2078 }
2079
1980 ret = __io_submit_sqe(ctx, req, s, true); 2080 ret = __io_submit_sqe(ctx, req, s, true);
1981 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { 2081 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
1982 struct io_uring_sqe *sqe_copy; 2082 struct io_uring_sqe *sqe_copy;
@@ -2049,13 +2149,6 @@ err:
2049 return; 2149 return;
2050 } 2150 }
2051 2151
2052 ret = io_req_defer(ctx, req, s->sqe);
2053 if (ret) {
2054 if (ret != -EIOCBQUEUED)
2055 goto err_req;
2056 return;
2057 }
2058
2059 /* 2152 /*
2060 * If we already have a head request, queue this one for async 2153 * If we already have a head request, queue this one for async
2061 * submittal once the head completes. If we don't have a head but 2154 * submittal once the head completes. If we don't have a head but
@@ -2232,15 +2325,7 @@ static int io_sq_thread(void *data)
2232 unsigned nr_events = 0; 2325 unsigned nr_events = 0;
2233 2326
2234 if (ctx->flags & IORING_SETUP_IOPOLL) { 2327 if (ctx->flags & IORING_SETUP_IOPOLL) {
2235 /*
2236 * We disallow the app entering submit/complete
2237 * with polling, but we still need to lock the
2238 * ring to prevent racing with polled issue
2239 * that got punted to a workqueue.
2240 */
2241 mutex_lock(&ctx->uring_lock);
2242 io_iopoll_check(ctx, &nr_events, 0); 2328 io_iopoll_check(ctx, &nr_events, 0);
2243 mutex_unlock(&ctx->uring_lock);
2244 } else { 2329 } else {
2245 /* 2330 /*
2246 * Normal IO, just pretend everything completed. 2331 * Normal IO, just pretend everything completed.
@@ -2385,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2385 return submit; 2470 return submit;
2386} 2471}
2387 2472
2388static unsigned io_cqring_events(struct io_cq_ring *ring)
2389{
2390 /* See comment at the top of this file */
2391 smp_rmb();
2392 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
2393}
2394
2395/* 2473/*
2396 * Wait until events become available, if we don't already have some. The 2474 * Wait until events become available, if we don't already have some. The
2397 * application must reap them itself, as they reside on the shared cq ring. 2475 * application must reap them itself, as they reside on the shared cq ring.
@@ -3142,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3142 min_complete = min(min_complete, ctx->cq_entries); 3220 min_complete = min(min_complete, ctx->cq_entries);
3143 3221
3144 if (ctx->flags & IORING_SETUP_IOPOLL) { 3222 if (ctx->flags & IORING_SETUP_IOPOLL) {
3145 mutex_lock(&ctx->uring_lock);
3146 ret = io_iopoll_check(ctx, &nr_events, min_complete); 3223 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3147 mutex_unlock(&ctx->uring_lock);
3148 } else { 3224 } else {
3149 ret = io_cqring_wait(ctx, min_complete, sig, sigsz); 3225 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3150 } 3226 }
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 2d165388d952..93cd11938bf5 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0-or-newer 1# SPDX-License-Identifier: GPL-2.0-or-later
2# 2#
3# Copyright (c) 2019 Oracle. 3# Copyright (c) 2019 Oracle.
4# All Rights Reserved. 4# All Rights Reserved.
diff --git a/fs/namespace.c b/fs/namespace.c
index 6464ea4acba9..d28d30b13043 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1463,7 +1463,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1463 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1463 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1464 1464
1465 disconnect = disconnect_mount(p, how); 1465 disconnect = disconnect_mount(p, how);
1466
1467 if (mnt_has_parent(p)) { 1466 if (mnt_has_parent(p)) {
1468 mnt_add_count(p->mnt_parent, -1); 1467 mnt_add_count(p->mnt_parent, -1);
1469 if (!disconnect) { 1468 if (!disconnect) {
@@ -1471,10 +1470,11 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1471 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1470 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1472 } else { 1471 } else {
1473 umount_mnt(p); 1472 umount_mnt(p);
1474 hlist_add_head(&p->mnt_umount, &unmounted);
1475 } 1473 }
1476 } 1474 }
1477 change_mnt_propagation(p, MS_PRIVATE); 1475 change_mnt_propagation(p, MS_PRIVATE);
1476 if (disconnect)
1477 hlist_add_head(&p->mnt_umount, &unmounted);
1478 } 1478 }
1479} 1479}
1480 1480
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 0ff3facf81da..071b90a45933 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -153,7 +153,7 @@ again:
153 /* Block nfs4_proc_unlck */ 153 /* Block nfs4_proc_unlck */
154 mutex_lock(&sp->so_delegreturn_mutex); 154 mutex_lock(&sp->so_delegreturn_mutex);
155 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 155 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
156 err = nfs4_open_delegation_recall(ctx, state, stateid, type); 156 err = nfs4_open_delegation_recall(ctx, state, stateid);
157 if (!err) 157 if (!err)
158 err = nfs_delegation_claim_locks(state, stateid); 158 err = nfs_delegation_claim_locks(state, stateid);
159 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 159 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -1046,6 +1046,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
1046 nfs4_schedule_state_manager(clp); 1046 nfs4_schedule_state_manager(clp);
1047} 1047}
1048 1048
1049static void
1050nfs_delegation_test_free_expired(struct inode *inode,
1051 nfs4_stateid *stateid,
1052 const struct cred *cred)
1053{
1054 struct nfs_server *server = NFS_SERVER(inode);
1055 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
1056 int status;
1057
1058 if (!cred)
1059 return;
1060 status = ops->test_and_free_expired(server, stateid, cred);
1061 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
1062 nfs_remove_bad_delegation(inode, stateid);
1063}
1064
1049/** 1065/**
1050 * nfs_reap_expired_delegations - reap expired delegations 1066 * nfs_reap_expired_delegations - reap expired delegations
1051 * @clp: nfs_client to process 1067 * @clp: nfs_client to process
@@ -1057,7 +1073,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
1057 */ 1073 */
1058void nfs_reap_expired_delegations(struct nfs_client *clp) 1074void nfs_reap_expired_delegations(struct nfs_client *clp)
1059{ 1075{
1060 const struct nfs4_minor_version_ops *ops = clp->cl_mvops;
1061 struct nfs_delegation *delegation; 1076 struct nfs_delegation *delegation;
1062 struct nfs_server *server; 1077 struct nfs_server *server;
1063 struct inode *inode; 1078 struct inode *inode;
@@ -1088,11 +1103,7 @@ restart:
1088 nfs4_stateid_copy(&stateid, &delegation->stateid); 1103 nfs4_stateid_copy(&stateid, &delegation->stateid);
1089 clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1104 clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
1090 rcu_read_unlock(); 1105 rcu_read_unlock();
1091 if (cred != NULL && 1106 nfs_delegation_test_free_expired(inode, &stateid, cred);
1092 ops->test_and_free_expired(server, &stateid, cred) < 0) {
1093 nfs_revoke_delegation(inode, &stateid);
1094 nfs_inode_find_state_and_recover(inode, &stateid);
1095 }
1096 put_cred(cred); 1107 put_cred(cred);
1097 if (nfs4_server_rebooted(clp)) { 1108 if (nfs4_server_rebooted(clp)) {
1098 nfs_inode_mark_test_expired_delegation(server,inode); 1109 nfs_inode_mark_test_expired_delegation(server,inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 5799777df5ec..9eb87ae4c982 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
63 63
64/* NFSv4 delegation-related procedures */ 64/* NFSv4 delegation-related procedures */
65int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync); 65int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
66int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type); 66int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
67int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); 67int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
68bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred); 68bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
69bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode); 69bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..0adfd8840110 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) 1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1488 nfs_file_set_open_context(file, ctx); 1488 nfs_file_set_open_context(file, ctx);
1489 else 1489 else
1490 err = -ESTALE; 1490 err = -EOPENSTALE;
1491out: 1491out:
1492 return err; 1492 return err;
1493} 1493}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..222d7115db71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
401 unsigned long bytes = 0; 401 unsigned long bytes = 0;
402 struct nfs_direct_req *dreq = hdr->dreq; 402 struct nfs_direct_req *dreq = hdr->dreq;
403 403
404 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
405 goto out_put;
406
407 spin_lock(&dreq->lock); 404 spin_lock(&dreq->lock);
408 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) 405 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
409 dreq->error = hdr->error; 406 dreq->error = hdr->error;
410 else 407
408 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
409 spin_unlock(&dreq->lock);
410 goto out_put;
411 }
412
413 if (hdr->good_bytes != 0)
411 nfs_direct_good_bytes(dreq, hdr); 414 nfs_direct_good_bytes(dreq, hdr);
412 415
416 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
417 dreq->error = 0;
418
413 spin_unlock(&dreq->lock); 419 spin_unlock(&dreq->lock);
414 420
415 while (!list_empty(&hdr->pages)) { 421 while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
782 bool request_commit = false; 788 bool request_commit = false;
783 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 789 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
784 790
785 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
786 goto out_put;
787
788 nfs_init_cinfo_from_dreq(&cinfo, dreq); 791 nfs_init_cinfo_from_dreq(&cinfo, dreq);
789 792
790 spin_lock(&dreq->lock); 793 spin_lock(&dreq->lock);
791 794
792 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) 795 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
793 dreq->error = hdr->error; 796 dreq->error = hdr->error;
794 if (dreq->error == 0) { 797
798 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
799 spin_unlock(&dreq->lock);
800 goto out_put;
801 }
802
803 if (hdr->good_bytes != 0) {
795 nfs_direct_good_bytes(dreq, hdr); 804 nfs_direct_good_bytes(dreq, hdr);
796 if (nfs_write_need_commit(hdr)) { 805 if (nfs_write_need_commit(hdr)) {
797 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 806 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/nfs_fs.h> 10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
11#include <linux/nfs_page.h> 12#include <linux/nfs_page.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/sched/mm.h> 14#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
928 pgm = &pgio->pg_mirrors[0]; 929 pgm = &pgio->pg_mirrors[0];
929 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 930 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
930 931
931 pgio->pg_maxretrans = io_maxretrans; 932 if (NFS_SERVER(pgio->pg_inode)->flags &
933 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
934 pgio->pg_maxretrans = io_maxretrans;
932 return; 935 return;
933out_nolseg: 936out_nolseg:
934 if (pgio->pg_error < 0) 937 if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
940 pgio->pg_lseg); 943 pgio->pg_lseg);
941 pnfs_put_lseg(pgio->pg_lseg); 944 pnfs_put_lseg(pgio->pg_lseg);
942 pgio->pg_lseg = NULL; 945 pgio->pg_lseg = NULL;
946 pgio->pg_maxretrans = 0;
943 nfs_pageio_reset_read_mds(pgio); 947 nfs_pageio_reset_read_mds(pgio);
944} 948}
945 949
@@ -1000,7 +1004,9 @@ retry:
1000 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 1004 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1001 } 1005 }
1002 1006
1003 pgio->pg_maxretrans = io_maxretrans; 1007 if (NFS_SERVER(pgio->pg_inode)->flags &
1008 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1009 pgio->pg_maxretrans = io_maxretrans;
1004 return; 1010 return;
1005 1011
1006out_mds: 1012out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
1010 pgio->pg_lseg); 1016 pgio->pg_lseg);
1011 pnfs_put_lseg(pgio->pg_lseg); 1017 pnfs_put_lseg(pgio->pg_lseg);
1012 pgio->pg_lseg = NULL; 1018 pgio->pg_lseg = NULL;
1019 pgio->pg_maxretrans = 0;
1013 nfs_pageio_reset_write_mds(pgio); 1020 nfs_pageio_reset_write_mds(pgio);
1014} 1021}
1015 1022
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1148 break; 1155 break;
1149 case -NFS4ERR_RETRY_UNCACHED_REP: 1156 case -NFS4ERR_RETRY_UNCACHED_REP:
1150 break; 1157 break;
1151 case -EAGAIN:
1152 return -NFS4ERR_RESET_TO_PNFS;
1153 /* Invalidate Layout errors */ 1158 /* Invalidate Layout errors */
1154 case -NFS4ERR_PNFS_NO_LAYOUT: 1159 case -NFS4ERR_PNFS_NO_LAYOUT:
1155 case -ESTALE: /* mapped NFS4ERR_STALE */ 1160 case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 case -EBADHANDLE: 1215 case -EBADHANDLE:
1211 case -ELOOP: 1216 case -ELOOP:
1212 case -ENOSPC: 1217 case -ENOSPC:
1213 case -EAGAIN:
1214 break; 1218 break;
1215 case -EJUKEBOX: 1219 case -EJUKEBOX:
1216 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1220 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1445 ff_layout_read_prepare_common(task, hdr); 1449 ff_layout_read_prepare_common(task, hdr);
1446} 1450}
1447 1451
1448static void
1449ff_layout_io_prepare_transmit(struct rpc_task *task,
1450 void *data)
1451{
1452 struct nfs_pgio_header *hdr = data;
1453
1454 if (!pnfs_is_valid_lseg(hdr->lseg))
1455 rpc_exit(task, -EAGAIN);
1456}
1457
1458static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1452static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1459{ 1453{
1460 struct nfs_pgio_header *hdr = data; 1454 struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
1740 1734
1741static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1735static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1742 .rpc_call_prepare = ff_layout_read_prepare_v3, 1736 .rpc_call_prepare = ff_layout_read_prepare_v3,
1743 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1744 .rpc_call_done = ff_layout_read_call_done, 1737 .rpc_call_done = ff_layout_read_call_done,
1745 .rpc_count_stats = ff_layout_read_count_stats, 1738 .rpc_count_stats = ff_layout_read_count_stats,
1746 .rpc_release = ff_layout_read_release, 1739 .rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1748 1741
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1742static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750 .rpc_call_prepare = ff_layout_read_prepare_v4, 1743 .rpc_call_prepare = ff_layout_read_prepare_v4,
1751 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1752 .rpc_call_done = ff_layout_read_call_done, 1744 .rpc_call_done = ff_layout_read_call_done,
1753 .rpc_count_stats = ff_layout_read_count_stats, 1745 .rpc_count_stats = ff_layout_read_count_stats,
1754 .rpc_release = ff_layout_read_release, 1746 .rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1756 1748
1757static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1749static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1758 .rpc_call_prepare = ff_layout_write_prepare_v3, 1750 .rpc_call_prepare = ff_layout_write_prepare_v3,
1759 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1760 .rpc_call_done = ff_layout_write_call_done, 1751 .rpc_call_done = ff_layout_write_call_done,
1761 .rpc_count_stats = ff_layout_write_count_stats, 1752 .rpc_count_stats = ff_layout_write_count_stats,
1762 .rpc_release = ff_layout_write_release, 1753 .rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1764 1755
1765static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1756static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1766 .rpc_call_prepare = ff_layout_write_prepare_v4, 1757 .rpc_call_prepare = ff_layout_write_prepare_v4,
1767 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1768 .rpc_call_done = ff_layout_write_call_done, 1758 .rpc_call_done = ff_layout_write_call_done,
1769 .rpc_count_stats = ff_layout_write_count_stats, 1759 .rpc_count_stats = ff_layout_write_count_stats,
1770 .rpc_release = ff_layout_write_release, 1760 .rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 53507aa96b0b..3800ab6f08fa 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
114 struct rb_node **p, *parent; 114 struct rb_node **p, *parent;
115 int diff; 115 int diff;
116 116
117 nfss->fscache_key = NULL;
118 nfss->fscache = NULL;
119 if (!(nfss->options & NFS_OPTION_FSCACHE))
120 return;
117 if (!uniq) { 121 if (!uniq) {
118 uniq = ""; 122 uniq = "";
119 ulen = 1; 123 ulen = 1;
@@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
226void nfs_fscache_init_inode(struct inode *inode) 230void nfs_fscache_init_inode(struct inode *inode)
227{ 231{
228 struct nfs_fscache_inode_auxdata auxdata; 232 struct nfs_fscache_inode_auxdata auxdata;
233 struct nfs_server *nfss = NFS_SERVER(inode);
229 struct nfs_inode *nfsi = NFS_I(inode); 234 struct nfs_inode *nfsi = NFS_I(inode);
230 235
231 nfsi->fscache = NULL; 236 nfsi->fscache = NULL;
232 if (!S_ISREG(inode->i_mode)) 237 if (!(nfss->fscache && S_ISREG(inode->i_mode)))
233 return; 238 return;
234 239
235 memset(&auxdata, 0, sizeof(auxdata)); 240 memset(&auxdata, 0, sizeof(auxdata));
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 25a75e40d91d..ad041cfbf9ec 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
182 */ 182 */
183static inline const char *nfs_server_fscache_state(struct nfs_server *server) 183static inline const char *nfs_server_fscache_state(struct nfs_server *server)
184{ 184{
185 if (server->fscache && (server->options & NFS_OPTION_FSCACHE)) 185 if (server->fscache)
186 return "yes"; 186 return "yes";
187 return "no "; 187 return "no ";
188} 188}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..c764cfe456e5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,21 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1404 return 0; 1404 return 0;
1405 1405
1406 /* No fileid? Just exit */
1407 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1408 return 0;
1406 /* Has the inode gone and changed behind our back? */ 1409 /* Has the inode gone and changed behind our back? */
1407 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 1410 if (nfsi->fileid != fattr->fileid) {
1411 /* Is this perhaps the mounted-on fileid? */
1412 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1413 nfsi->fileid == fattr->mounted_on_fileid)
1414 return 0;
1408 return -ESTALE; 1415 return -ESTALE;
1416 }
1409 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1417 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
1410 return -ESTALE; 1418 return -ESTALE;
1411 1419
1420
1412 if (!nfs_file_has_buffered_writers(nfsi)) { 1421 if (!nfs_file_has_buffered_writers(nfsi)) {
1413 /* Verify a few of the more important attributes */ 1422 /* Verify a few of the more important attributes */
1414 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr)) 1423 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1777,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
1768EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); 1777EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
1769 1778
1770 1779
1771static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
1772 struct nfs_fattr *fattr)
1773{
1774 bool ret1 = true, ret2 = true;
1775
1776 if (fattr->valid & NFS_ATTR_FATTR_FILEID)
1777 ret1 = (nfsi->fileid == fattr->fileid);
1778 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1779 ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
1780 return ret1 || ret2;
1781}
1782
1783/* 1780/*
1784 * Many nfs protocol calls return the new file attributes after 1781 * Many nfs protocol calls return the new file attributes after
1785 * an operation. Here we update the inode to reflect the state 1782 * an operation. Here we update the inode to reflect the state
@@ -1810,7 +1807,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1810 nfs_display_fhandle_hash(NFS_FH(inode)), 1807 nfs_display_fhandle_hash(NFS_FH(inode)),
1811 atomic_read(&inode->i_count), fattr->valid); 1808 atomic_read(&inode->i_count), fattr->valid);
1812 1809
1813 if (!nfs_fileid_valid(nfsi, fattr)) { 1810 /* No fileid? Just exit */
1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1812 return 0;
1813 /* Has the inode gone and changed behind our back? */
1814 if (nfsi->fileid != fattr->fileid) {
1815 /* Is this perhaps the mounted-on fileid? */
1816 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1817 nfsi->fileid == fattr->mounted_on_fileid)
1818 return 0;
1814 printk(KERN_ERR "NFS: server %s error: fileid changed\n" 1819 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1815 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", 1820 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1816 NFS_SERVER(inode)->nfs_client->cl_hostname, 1821 NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..e64f810223be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
775 } 775 }
776} 776}
777 777
778static inline bool nfs_error_is_fatal_on_server(int err)
779{
780 switch (err) {
781 case 0:
782 case -ERESTARTSYS:
783 case -EINTR:
784 return false;
785 }
786 return nfs_error_is_fatal(err);
787}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index d778dad9a75e..3564da1ba8a1 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
465 465
466extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t); 466extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
467extern void nfs4_put_state_owner(struct nfs4_state_owner *); 467extern void nfs4_put_state_owner(struct nfs4_state_owner *);
468extern void nfs4_purge_state_owners(struct nfs_server *); 468extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
469extern void nfs4_free_state_owners(struct list_head *head);
469extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); 470extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
470extern void nfs4_put_open_state(struct nfs4_state *); 471extern void nfs4_put_open_state(struct nfs4_state *);
471extern void nfs4_close_state(struct nfs4_state *, fmode_t); 472extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 616393a01c06..da6204025a2d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -758,9 +758,12 @@ out:
758 758
759static void nfs4_destroy_server(struct nfs_server *server) 759static void nfs4_destroy_server(struct nfs_server *server)
760{ 760{
761 LIST_HEAD(freeme);
762
761 nfs_server_return_all_delegations(server); 763 nfs_server_return_all_delegations(server);
762 unset_pnfs_layoutdriver(server); 764 unset_pnfs_layoutdriver(server);
763 nfs4_purge_state_owners(server); 765 nfs4_purge_state_owners(server, &freeme);
766 nfs4_free_state_owners(&freeme);
764} 767}
765 768
766/* 769/*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
73 if (IS_ERR(inode)) { 73 if (IS_ERR(inode)) {
74 err = PTR_ERR(inode); 74 err = PTR_ERR(inode);
75 switch (err) { 75 switch (err) {
76 case -EPERM:
77 case -EACCES:
78 case -EDQUOT:
79 case -ENOSPC:
80 case -EROFS:
81 goto out_put_ctx;
82 default: 76 default:
77 goto out_put_ctx;
78 case -ENOENT:
79 case -ESTALE:
80 case -EISDIR:
81 case -ENOTDIR:
82 case -ELOOP:
83 goto out_drop; 83 goto out_drop;
84 } 84 }
85 } 85 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 39896afc6edf..1406858bae6c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1683,6 +1683,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state,
1683 write_sequnlock(&state->seqlock); 1683 write_sequnlock(&state->seqlock);
1684} 1684}
1685 1685
1686static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1687{
1688 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1689 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1690 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1691 clear_bit(NFS_OPEN_STATE, &state->flags);
1692}
1693
1686static void nfs_state_set_delegation(struct nfs4_state *state, 1694static void nfs_state_set_delegation(struct nfs4_state *state,
1687 const nfs4_stateid *deleg_stateid, 1695 const nfs4_stateid *deleg_stateid,
1688 fmode_t fmode) 1696 fmode_t fmode)
@@ -1907,8 +1915,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1907 if (data->o_res.delegation_type != 0) 1915 if (data->o_res.delegation_type != 0)
1908 nfs4_opendata_check_deleg(data, state); 1916 nfs4_opendata_check_deleg(data, state);
1909update: 1917update:
1910 update_open_stateid(state, &data->o_res.stateid, NULL, 1918 if (!update_open_stateid(state, &data->o_res.stateid,
1911 data->o_arg.fmode); 1919 NULL, data->o_arg.fmode))
1920 return ERR_PTR(-EAGAIN);
1912 refcount_inc(&state->count); 1921 refcount_inc(&state->count);
1913 1922
1914 return state; 1923 return state;
@@ -1973,8 +1982,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1973 1982
1974 if (data->o_res.delegation_type != 0) 1983 if (data->o_res.delegation_type != 0)
1975 nfs4_opendata_check_deleg(data, state); 1984 nfs4_opendata_check_deleg(data, state);
1976 update_open_stateid(state, &data->o_res.stateid, NULL, 1985 if (!update_open_stateid(state, &data->o_res.stateid,
1977 data->o_arg.fmode); 1986 NULL, data->o_arg.fmode)) {
1987 nfs4_put_open_state(state);
1988 state = ERR_PTR(-EAGAIN);
1989 }
1978out: 1990out:
1979 nfs_release_seqid(data->o_arg.seqid); 1991 nfs_release_seqid(data->o_arg.seqid);
1980 return state; 1992 return state;
@@ -2074,13 +2086,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
2074{ 2086{
2075 int ret; 2087 int ret;
2076 2088
2077 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
2078 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2079 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2080 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2081 /* memory barrier prior to reading state->n_* */ 2089 /* memory barrier prior to reading state->n_* */
2082 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2083 clear_bit(NFS_OPEN_STATE, &state->flags);
2084 smp_rmb(); 2090 smp_rmb();
2085 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2091 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2086 if (ret != 0) 2092 if (ret != 0)
@@ -2156,6 +2162,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
2156 ctx = nfs4_state_find_open_context(state); 2162 ctx = nfs4_state_find_open_context(state);
2157 if (IS_ERR(ctx)) 2163 if (IS_ERR(ctx))
2158 return -EAGAIN; 2164 return -EAGAIN;
2165 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2166 nfs_state_clear_open_state_flags(state);
2159 ret = nfs4_do_open_reclaim(ctx, state); 2167 ret = nfs4_do_open_reclaim(ctx, state);
2160 put_nfs_open_context(ctx); 2168 put_nfs_open_context(ctx);
2161 return ret; 2169 return ret;
@@ -2171,18 +2179,17 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
2171 case -ENOENT: 2179 case -ENOENT:
2172 case -EAGAIN: 2180 case -EAGAIN:
2173 case -ESTALE: 2181 case -ESTALE:
2182 case -ETIMEDOUT:
2174 break; 2183 break;
2175 case -NFS4ERR_BADSESSION: 2184 case -NFS4ERR_BADSESSION:
2176 case -NFS4ERR_BADSLOT: 2185 case -NFS4ERR_BADSLOT:
2177 case -NFS4ERR_BAD_HIGH_SLOT: 2186 case -NFS4ERR_BAD_HIGH_SLOT:
2178 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2187 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2179 case -NFS4ERR_DEADSESSION: 2188 case -NFS4ERR_DEADSESSION:
2180 set_bit(NFS_DELEGATED_STATE, &state->flags);
2181 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 2189 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
2182 return -EAGAIN; 2190 return -EAGAIN;
2183 case -NFS4ERR_STALE_CLIENTID: 2191 case -NFS4ERR_STALE_CLIENTID:
2184 case -NFS4ERR_STALE_STATEID: 2192 case -NFS4ERR_STALE_STATEID:
2185 set_bit(NFS_DELEGATED_STATE, &state->flags);
2186 /* Don't recall a delegation if it was lost */ 2193 /* Don't recall a delegation if it was lost */
2187 nfs4_schedule_lease_recovery(server->nfs_client); 2194 nfs4_schedule_lease_recovery(server->nfs_client);
2188 return -EAGAIN; 2195 return -EAGAIN;
@@ -2203,7 +2210,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
2203 return -EAGAIN; 2210 return -EAGAIN;
2204 case -NFS4ERR_DELAY: 2211 case -NFS4ERR_DELAY:
2205 case -NFS4ERR_GRACE: 2212 case -NFS4ERR_GRACE:
2206 set_bit(NFS_DELEGATED_STATE, &state->flags);
2207 ssleep(1); 2213 ssleep(1);
2208 return -EAGAIN; 2214 return -EAGAIN;
2209 case -ENOMEM: 2215 case -ENOMEM:
@@ -2219,8 +2225,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
2219} 2225}
2220 2226
2221int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2227int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2222 struct nfs4_state *state, const nfs4_stateid *stateid, 2228 struct nfs4_state *state, const nfs4_stateid *stateid)
2223 fmode_t type)
2224{ 2229{
2225 struct nfs_server *server = NFS_SERVER(state->inode); 2230 struct nfs_server *server = NFS_SERVER(state->inode);
2226 struct nfs4_opendata *opendata; 2231 struct nfs4_opendata *opendata;
@@ -2231,20 +2236,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2231 if (IS_ERR(opendata)) 2236 if (IS_ERR(opendata))
2232 return PTR_ERR(opendata); 2237 return PTR_ERR(opendata);
2233 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2238 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2234 nfs_state_clear_delegation(state); 2239 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2235 switch (type & (FMODE_READ|FMODE_WRITE)) {
2236 case FMODE_READ|FMODE_WRITE:
2237 case FMODE_WRITE:
2238 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2240 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2239 if (err) 2241 if (err)
2240 break; 2242 goto out;
2243 }
2244 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2241 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2245 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2242 if (err) 2246 if (err)
2243 break; 2247 goto out;
2244 /* Fall through */ 2248 }
2245 case FMODE_READ: 2249 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2246 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2250 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2251 if (err)
2252 goto out;
2247 } 2253 }
2254 nfs_state_clear_delegation(state);
2255out:
2248 nfs4_opendata_put(opendata); 2256 nfs4_opendata_put(opendata);
2249 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2257 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2250} 2258}
@@ -2492,6 +2500,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
2492 if (!ctx) { 2500 if (!ctx) {
2493 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2501 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2494 data->is_recover = true; 2502 data->is_recover = true;
2503 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2495 } else { 2504 } else {
2496 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2505 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2497 pnfs_lgopen_prepare(data, ctx); 2506 pnfs_lgopen_prepare(data, ctx);
@@ -2698,6 +2707,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
2698{ 2707{
2699 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2708 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2700 nfs40_clear_delegation_stateid(state); 2709 nfs40_clear_delegation_stateid(state);
2710 nfs_state_clear_open_state_flags(state);
2701 return nfs4_open_expired(sp, state); 2711 return nfs4_open_expired(sp, state);
2702} 2712}
2703 2713
@@ -2740,13 +2750,13 @@ out_free:
2740 return -NFS4ERR_EXPIRED; 2750 return -NFS4ERR_EXPIRED;
2741} 2751}
2742 2752
2743static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2753static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2744{ 2754{
2745 struct nfs_server *server = NFS_SERVER(state->inode); 2755 struct nfs_server *server = NFS_SERVER(state->inode);
2746 nfs4_stateid stateid; 2756 nfs4_stateid stateid;
2747 struct nfs_delegation *delegation; 2757 struct nfs_delegation *delegation;
2748 const struct cred *cred = NULL; 2758 const struct cred *cred = NULL;
2749 int status; 2759 int status, ret = NFS_OK;
2750 2760
2751 /* Get the delegation credential for use by test/free_stateid */ 2761 /* Get the delegation credential for use by test/free_stateid */
2752 rcu_read_lock(); 2762 rcu_read_lock();
@@ -2754,20 +2764,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2754 if (delegation == NULL) { 2764 if (delegation == NULL) {
2755 rcu_read_unlock(); 2765 rcu_read_unlock();
2756 nfs_state_clear_delegation(state); 2766 nfs_state_clear_delegation(state);
2757 return; 2767 return NFS_OK;
2758 } 2768 }
2759 2769
2760 nfs4_stateid_copy(&stateid, &delegation->stateid); 2770 nfs4_stateid_copy(&stateid, &delegation->stateid);
2761 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
2762 rcu_read_unlock();
2763 nfs_state_clear_delegation(state);
2764 return;
2765 }
2766 2771
2767 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2772 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2768 &delegation->flags)) { 2773 &delegation->flags)) {
2769 rcu_read_unlock(); 2774 rcu_read_unlock();
2770 return; 2775 return NFS_OK;
2771 } 2776 }
2772 2777
2773 if (delegation->cred) 2778 if (delegation->cred)
@@ -2777,9 +2782,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2777 trace_nfs4_test_delegation_stateid(state, NULL, status); 2782 trace_nfs4_test_delegation_stateid(state, NULL, status);
2778 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2783 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2779 nfs_finish_clear_delegation_stateid(state, &stateid); 2784 nfs_finish_clear_delegation_stateid(state, &stateid);
2785 else
2786 ret = status;
2780 2787
2781 if (delegation->cred) 2788 put_cred(cred);
2782 put_cred(cred); 2789 return ret;
2790}
2791
2792static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2793{
2794 nfs4_stateid tmp;
2795
2796 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2797 nfs4_copy_delegation_stateid(state->inode, state->state,
2798 &tmp, NULL) &&
2799 nfs4_stateid_match_other(&state->stateid, &tmp))
2800 nfs_state_set_delegation(state, &tmp, state->state);
2801 else
2802 nfs_state_clear_delegation(state);
2783} 2803}
2784 2804
2785/** 2805/**
@@ -2849,21 +2869,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
2849 const struct cred *cred = state->owner->so_cred; 2869 const struct cred *cred = state->owner->so_cred;
2850 int status; 2870 int status;
2851 2871
2852 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) { 2872 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2853 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
2854 if (nfs4_have_delegation(state->inode, state->state))
2855 return NFS_OK;
2856 return -NFS4ERR_OPENMODE;
2857 }
2858 return -NFS4ERR_BAD_STATEID; 2873 return -NFS4ERR_BAD_STATEID;
2859 }
2860 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 2874 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2861 trace_nfs4_test_open_stateid(state, NULL, status); 2875 trace_nfs4_test_open_stateid(state, NULL, status);
2862 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 2876 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2863 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2877 nfs_state_clear_open_state_flags(state);
2864 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2865 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2866 clear_bit(NFS_OPEN_STATE, &state->flags);
2867 stateid->type = NFS4_INVALID_STATEID_TYPE; 2878 stateid->type = NFS4_INVALID_STATEID_TYPE;
2868 return status; 2879 return status;
2869 } 2880 }
@@ -2876,7 +2887,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
2876{ 2887{
2877 int status; 2888 int status;
2878 2889
2879 nfs41_check_delegation_stateid(state); 2890 status = nfs41_check_delegation_stateid(state);
2891 if (status != NFS_OK)
2892 return status;
2893 nfs41_delegation_recover_stateid(state);
2894
2880 status = nfs41_check_expired_locks(state); 2895 status = nfs41_check_expired_locks(state);
2881 if (status != NFS_OK) 2896 if (status != NFS_OK)
2882 return status; 2897 return status;
@@ -3201,7 +3216,7 @@ static int _nfs4_do_setattr(struct inode *inode,
3201 3216
3202 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3217 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3203 /* Use that stateid */ 3218 /* Use that stateid */
3204 } else if (ctx != NULL) { 3219 } else if (ctx != NULL && ctx->state) {
3205 struct nfs_lock_context *l_ctx; 3220 struct nfs_lock_context *l_ctx;
3206 if (!nfs4_valid_open_stateid(ctx->state)) 3221 if (!nfs4_valid_open_stateid(ctx->state))
3207 return -EBADF; 3222 return -EBADF;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9afd051a4876..cad4e064b328 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
624/** 624/**
625 * nfs4_purge_state_owners - Release all cached state owners 625 * nfs4_purge_state_owners - Release all cached state owners
626 * @server: nfs_server with cached state owners to release 626 * @server: nfs_server with cached state owners to release
627 * @head: resulting list of state owners
627 * 628 *
628 * Called at umount time. Remaining state owners will be on 629 * Called at umount time. Remaining state owners will be on
629 * the LRU with ref count of zero. 630 * the LRU with ref count of zero.
631 * Note that the state owners are not freed, but are added
632 * to the list @head, which can later be used as an argument
633 * to nfs4_free_state_owners.
630 */ 634 */
631void nfs4_purge_state_owners(struct nfs_server *server) 635void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
632{ 636{
633 struct nfs_client *clp = server->nfs_client; 637 struct nfs_client *clp = server->nfs_client;
634 struct nfs4_state_owner *sp, *tmp; 638 struct nfs4_state_owner *sp, *tmp;
635 LIST_HEAD(doomed);
636 639
637 spin_lock(&clp->cl_lock); 640 spin_lock(&clp->cl_lock);
638 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { 641 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
639 list_move(&sp->so_lru, &doomed); 642 list_move(&sp->so_lru, head);
640 nfs4_remove_state_owner_locked(sp); 643 nfs4_remove_state_owner_locked(sp);
641 } 644 }
642 spin_unlock(&clp->cl_lock); 645 spin_unlock(&clp->cl_lock);
646}
643 647
644 list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { 648/**
649 * nfs4_purge_state_owners - Release all cached state owners
650 * @head: resulting list of state owners
651 *
652 * Frees a list of state owners that was generated by
653 * nfs4_purge_state_owners
654 */
655void nfs4_free_state_owners(struct list_head *head)
656{
657 struct nfs4_state_owner *sp, *tmp;
658
659 list_for_each_entry_safe(sp, tmp, head, so_lru) {
645 list_del(&sp->so_lru); 660 list_del(&sp->so_lru);
646 nfs4_free_state_owner(sp); 661 nfs4_free_state_owner(sp);
647 } 662 }
@@ -1463,7 +1478,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
1463 nfs4_schedule_state_manager(clp); 1478 nfs4_schedule_state_manager(clp);
1464} 1479}
1465 1480
1466static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) 1481static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
1467{ 1482{
1468 struct inode *inode = state->inode; 1483 struct inode *inode = state->inode;
1469 struct nfs_inode *nfsi = NFS_I(inode); 1484 struct nfs_inode *nfsi = NFS_I(inode);
@@ -1474,6 +1489,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
1474 if (ctx->state != state) 1489 if (ctx->state != state)
1475 continue; 1490 continue;
1476 set_bit(NFS_CONTEXT_BAD, &ctx->flags); 1491 set_bit(NFS_CONTEXT_BAD, &ctx->flags);
1492 pr_warn("NFSv4: state recovery failed for open file %pd2, "
1493 "error = %d\n", ctx->dentry, err);
1477 } 1494 }
1478 rcu_read_unlock(); 1495 rcu_read_unlock();
1479} 1496}
@@ -1481,7 +1498,7 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
1481static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error) 1498static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
1482{ 1499{
1483 set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags); 1500 set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
1484 nfs4_state_mark_open_context_bad(state); 1501 nfs4_state_mark_open_context_bad(state, error);
1485} 1502}
1486 1503
1487 1504
@@ -1512,6 +1529,7 @@ restart:
1512 switch (status) { 1529 switch (status) {
1513 case 0: 1530 case 0:
1514 break; 1531 break;
1532 case -ETIMEDOUT:
1515 case -ESTALE: 1533 case -ESTALE:
1516 case -NFS4ERR_ADMIN_REVOKED: 1534 case -NFS4ERR_ADMIN_REVOKED:
1517 case -NFS4ERR_STALE_STATEID: 1535 case -NFS4ERR_STALE_STATEID:
@@ -1605,6 +1623,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
1605static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) 1623static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1606{ 1624{
1607 struct nfs4_state *state; 1625 struct nfs4_state *state;
1626 unsigned int loop = 0;
1608 int status = 0; 1627 int status = 0;
1609 1628
1610 /* Note: we rely on the sp->so_states list being ordered 1629 /* Note: we rely on the sp->so_states list being ordered
@@ -1631,8 +1650,10 @@ restart:
1631 1650
1632 switch (status) { 1651 switch (status) {
1633 default: 1652 default:
1634 if (status >= 0) 1653 if (status >= 0) {
1654 loop = 0;
1635 break; 1655 break;
1656 }
1636 printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); 1657 printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
1637 /* Fall through */ 1658 /* Fall through */
1638 case -ENOENT: 1659 case -ENOENT:
@@ -1646,6 +1667,10 @@ restart:
1646 break; 1667 break;
1647 case -EAGAIN: 1668 case -EAGAIN:
1648 ssleep(1); 1669 ssleep(1);
1670 if (loop++ < 10) {
1671 set_bit(ops->state_flag_bit, &state->flags);
1672 break;
1673 }
1649 /* Fall through */ 1674 /* Fall through */
1650 case -NFS4ERR_ADMIN_REVOKED: 1675 case -NFS4ERR_ADMIN_REVOKED:
1651 case -NFS4ERR_STALE_STATEID: 1676 case -NFS4ERR_STALE_STATEID:
@@ -1658,11 +1683,13 @@ restart:
1658 case -NFS4ERR_EXPIRED: 1683 case -NFS4ERR_EXPIRED:
1659 case -NFS4ERR_NO_GRACE: 1684 case -NFS4ERR_NO_GRACE:
1660 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); 1685 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1686 /* Fall through */
1661 case -NFS4ERR_STALE_CLIENTID: 1687 case -NFS4ERR_STALE_CLIENTID:
1662 case -NFS4ERR_BADSESSION: 1688 case -NFS4ERR_BADSESSION:
1663 case -NFS4ERR_BADSLOT: 1689 case -NFS4ERR_BADSLOT:
1664 case -NFS4ERR_BAD_HIGH_SLOT: 1690 case -NFS4ERR_BAD_HIGH_SLOT:
1665 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1691 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1692 case -ETIMEDOUT:
1666 goto out_err; 1693 goto out_err;
1667 } 1694 }
1668 nfs4_put_open_state(state); 1695 nfs4_put_open_state(state);
@@ -1856,12 +1883,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
1856 struct nfs4_state_owner *sp; 1883 struct nfs4_state_owner *sp;
1857 struct nfs_server *server; 1884 struct nfs_server *server;
1858 struct rb_node *pos; 1885 struct rb_node *pos;
1886 LIST_HEAD(freeme);
1859 int status = 0; 1887 int status = 0;
1860 1888
1861restart: 1889restart:
1862 rcu_read_lock(); 1890 rcu_read_lock();
1863 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 1891 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1864 nfs4_purge_state_owners(server); 1892 nfs4_purge_state_owners(server, &freeme);
1865 spin_lock(&clp->cl_lock); 1893 spin_lock(&clp->cl_lock);
1866 for (pos = rb_first(&server->state_owners); 1894 for (pos = rb_first(&server->state_owners);
1867 pos != NULL; 1895 pos != NULL;
@@ -1890,6 +1918,7 @@ restart:
1890 spin_unlock(&clp->cl_lock); 1918 spin_unlock(&clp->cl_lock);
1891 } 1919 }
1892 rcu_read_unlock(); 1920 rcu_read_unlock();
1921 nfs4_free_state_owners(&freeme);
1893 return 0; 1922 return 0;
1894} 1923}
1895 1924
@@ -1945,7 +1974,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
1945 return -EPERM; 1974 return -EPERM;
1946 case -EACCES: 1975 case -EACCES:
1947 case -NFS4ERR_DELAY: 1976 case -NFS4ERR_DELAY:
1948 case -ETIMEDOUT:
1949 case -EAGAIN: 1977 case -EAGAIN:
1950 ssleep(1); 1978 ssleep(1);
1951 break; 1979 break;
@@ -2574,7 +2602,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
2574 } 2602 }
2575 2603
2576 /* Now recover expired state... */ 2604 /* Now recover expired state... */
2577 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { 2605 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
2578 section = "reclaim nograce"; 2606 section = "reclaim nograce";
2579 status = nfs4_do_reclaim(clp, 2607 status = nfs4_do_reclaim(clp,
2580 clp->cl_mvops->nograce_recovery_ops); 2608 clp->cl_mvops->nograce_recovery_ops);
@@ -2582,6 +2610,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
2582 continue; 2610 continue;
2583 if (status < 0) 2611 if (status < 0)
2584 goto out_error; 2612 goto out_error;
2613 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
2585 } 2614 }
2586 2615
2587 nfs4_end_drain_session(clp); 2616 nfs4_end_drain_session(clp);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
590 } 590 }
591 591
592 hdr->res.fattr = &hdr->fattr; 592 hdr->res.fattr = &hdr->fattr;
593 hdr->res.count = count; 593 hdr->res.count = 0;
594 hdr->res.eof = 0; 594 hdr->res.eof = 0;
595 hdr->res.verf = &hdr->verf; 595 hdr->res.verf = &hdr->verf;
596 nfs_fattr_init(&hdr->fattr); 596 nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252 struct nfs_pgio_header *hdr) 1252 struct nfs_pgio_header *hdr)
1253{ 1253{
1254 LIST_HEAD(failed); 1254 LIST_HEAD(pages);
1255 1255
1256 desc->pg_io_completion = hdr->io_completion; 1256 desc->pg_io_completion = hdr->io_completion;
1257 desc->pg_dreq = hdr->dreq; 1257 desc->pg_dreq = hdr->dreq;
1258 while (!list_empty(&hdr->pages)) { 1258 list_splice_init(&hdr->pages, &pages);
1259 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1259 while (!list_empty(&pages)) {
1260 struct nfs_page *req = nfs_list_entry(pages.next);
1260 1261
1261 if (!nfs_pageio_add_request(desc, req)) 1262 if (!nfs_pageio_add_request(desc, req))
1262 nfs_list_move_request(req, &failed); 1263 break;
1263 } 1264 }
1264 nfs_pageio_complete(desc); 1265 nfs_pageio_complete(desc);
1265 if (!list_empty(&failed)) { 1266 if (!list_empty(&pages)) {
1266 list_move(&failed, &hdr->pages); 1267 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1267 return desc->pg_error < 0 ? desc->pg_error : -EIO; 1268 hdr->completion_ops->error_cleanup(&pages, err);
1269 nfs_set_pgio_error(hdr, err, hdr->io_start);
1270 return err;
1268 } 1271 }
1269 return 0; 1272 return 0;
1270} 1273}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 75bd5b552ba4..4525d5acae38 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1903,12 +1903,6 @@ lookup_again:
1903 goto out_unlock; 1903 goto out_unlock;
1904 } 1904 }
1905 1905
1906 if (!nfs4_valid_open_stateid(ctx->state)) {
1907 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1908 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1909 goto out_unlock;
1910 }
1911
1912 /* 1906 /*
1913 * Choose a stateid for the LAYOUTGET. If we don't have a layout 1907 * Choose a stateid for the LAYOUTGET. If we don't have a layout
1914 * stateid, or it has been invalidated, then we must use the open 1908 * stateid, or it has been invalidated, then we must use the open
@@ -1939,6 +1933,7 @@ lookup_again:
1939 iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ, 1933 iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
1940 NULL, &stateid, NULL); 1934 NULL, &stateid, NULL);
1941 if (status != 0) { 1935 if (status != 0) {
1936 lseg = ERR_PTR(status);
1942 trace_pnfs_update_layout(ino, pos, count, 1937 trace_pnfs_update_layout(ino, pos, count,
1943 iomode, lo, lseg, 1938 iomode, lo, lseg,
1944 PNFS_UPDATE_LAYOUT_INVALID_OPEN); 1939 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
627 /* Add this address as an alias */ 627 /* Add this address as an alias */
628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
629 rpc_clnt_test_and_add_xprt, NULL); 629 rpc_clnt_test_and_add_xprt, NULL);
630 } else 630 continue;
631 clp = get_v3_ds_connect(mds_srv, 631 }
632 (struct sockaddr *)&da->da_addr, 632 clp = get_v3_ds_connect(mds_srv,
633 da->da_addrlen, IPPROTO_TCP, 633 (struct sockaddr *)&da->da_addr,
634 timeo, retrans); 634 da->da_addrlen, IPPROTO_TCP,
635 timeo, retrans);
636 if (IS_ERR(clp))
637 continue;
638 clp->cl_rpcclient->cl_softerr = 0;
639 clp->cl_rpcclient->cl_softrtry = 0;
635 } 640 }
636 641
637 if (IS_ERR(clp)) { 642 if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
594 /* Emulate the eof flag, which isn't normally needed in NFSv2 594 /* Emulate the eof flag, which isn't normally needed in NFSv2
595 * as it is guaranteed to always return the file attributes 595 * as it is guaranteed to always return the file attributes
596 */ 596 */
597 if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) 597 if ((hdr->res.count == 0 && hdr->args.count > 0) ||
598 hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
598 hdr->res.eof = 1; 599 hdr->res.eof = 1;
599 } 600 }
600 return 0; 601 return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
615 616
616static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 617static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
617{ 618{
618 if (task->tk_status >= 0) 619 if (task->tk_status >= 0) {
620 hdr->res.count = hdr->args.count;
619 nfs_writeback_update_inode(hdr); 621 nfs_writeback_update_inode(hdr);
622 }
620 return 0; 623 return 0;
621} 624}
622 625
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
91} 91}
92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
93 93
94static void nfs_readpage_release(struct nfs_page *req) 94static void nfs_readpage_release(struct nfs_page *req, int error)
95{ 95{
96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
97 struct page *page = req->wb_page;
97 98
98 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 99 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
99 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 100 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
100 (long long)req_offset(req)); 101 (long long)req_offset(req));
101 102
103 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
104 SetPageError(page);
102 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 105 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
103 if (PageUptodate(req->wb_page)) 106 struct address_space *mapping = page_file_mapping(page);
104 nfs_readpage_to_fscache(inode, req->wb_page, 0);
105 107
106 unlock_page(req->wb_page); 108 if (PageUptodate(page))
109 nfs_readpage_to_fscache(inode, page, 0);
110 else if (!PageError(page) && !PagePrivate(page))
111 generic_error_remove_page(mapping, page);
112 unlock_page(page);
107 } 113 }
108 nfs_release_request(req); 114 nfs_release_request(req);
109} 115}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
131 &nfs_async_read_completion_ops); 137 &nfs_async_read_completion_ops);
132 if (!nfs_pageio_add_request(&pgio, new)) { 138 if (!nfs_pageio_add_request(&pgio, new)) {
133 nfs_list_remove_request(new); 139 nfs_list_remove_request(new);
134 nfs_readpage_release(new); 140 nfs_readpage_release(new, pgio.pg_error);
135 } 141 }
136 nfs_pageio_complete(&pgio); 142 nfs_pageio_complete(&pgio);
137 143
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
153static void nfs_read_completion(struct nfs_pgio_header *hdr) 159static void nfs_read_completion(struct nfs_pgio_header *hdr)
154{ 160{
155 unsigned long bytes = 0; 161 unsigned long bytes = 0;
162 int error;
156 163
157 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 164 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
158 goto out; 165 goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
179 zero_user_segment(page, start, end); 186 zero_user_segment(page, start, end);
180 } 187 }
181 } 188 }
189 error = 0;
182 bytes += req->wb_bytes; 190 bytes += req->wb_bytes;
183 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 191 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
184 if (bytes <= hdr->good_bytes) 192 if (bytes <= hdr->good_bytes)
185 nfs_page_group_set_uptodate(req); 193 nfs_page_group_set_uptodate(req);
194 else {
195 error = hdr->error;
196 xchg(&nfs_req_openctx(req)->error, error);
197 }
186 } else 198 } else
187 nfs_page_group_set_uptodate(req); 199 nfs_page_group_set_uptodate(req);
188 nfs_list_remove_request(req); 200 nfs_list_remove_request(req);
189 nfs_readpage_release(req); 201 nfs_readpage_release(req, error);
190 } 202 }
191out: 203out:
192 hdr->release(hdr); 204 hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
213 while (!list_empty(head)) { 225 while (!list_empty(head)) {
214 req = nfs_list_entry(head->next); 226 req = nfs_list_entry(head->next);
215 nfs_list_remove_request(req); 227 nfs_list_remove_request(req);
216 nfs_readpage_release(req); 228 nfs_readpage_release(req, error);
217 } 229 }
218} 230}
219 231
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
337 goto out; 349 goto out;
338 } 350 }
339 351
352 xchg(&ctx->error, 0);
340 error = nfs_readpage_async(ctx, inode, page); 353 error = nfs_readpage_async(ctx, inode, page);
341 354 if (!error) {
355 error = wait_on_page_locked_killable(page);
356 if (!PageUptodate(page) && !error)
357 error = xchg(&ctx->error, 0);
358 }
342out: 359out:
343 put_nfs_open_context(ctx); 360 put_nfs_open_context(ctx);
344 return error; 361 return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
372 zero_user_segment(page, len, PAGE_SIZE); 389 zero_user_segment(page, len, PAGE_SIZE);
373 if (!nfs_pageio_add_request(desc->pgio, new)) { 390 if (!nfs_pageio_add_request(desc->pgio, new)) {
374 nfs_list_remove_request(new); 391 nfs_list_remove_request(new);
375 nfs_readpage_release(new);
376 error = desc->pgio->pg_error; 392 error = desc->pgio->pg_error;
393 nfs_readpage_release(new, error);
377 goto out; 394 goto out;
378 } 395 }
379 return 0; 396 return 0;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 628631e2e34f..703f595dce90 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2260,6 +2260,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
2260 data->acdirmin != nfss->acdirmin / HZ || 2260 data->acdirmin != nfss->acdirmin / HZ ||
2261 data->acdirmax != nfss->acdirmax / HZ || 2261 data->acdirmax != nfss->acdirmax / HZ ||
2262 data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) || 2262 data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
2263 (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
2263 data->nfs_server.port != nfss->port || 2264 data->nfs_server.port != nfss->port ||
2264 data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen || 2265 data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
2265 !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address, 2266 !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..85ca49549b39 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 58static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59static const struct nfs_rw_ops nfs_rw_write_ops; 59static const struct nfs_rw_ops nfs_rw_write_ops;
60static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req); 61static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 62static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode); 63 struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
591 592
592static void nfs_write_error(struct nfs_page *req, int error) 593static void nfs_write_error(struct nfs_page *req, int error)
593{ 594{
595 nfs_set_pageerror(page_file_mapping(req->wb_page));
594 nfs_mapping_set_error(req->wb_page, error); 596 nfs_mapping_set_error(req->wb_page, error);
597 nfs_inode_remove_request(req);
595 nfs_end_page_writeback(req); 598 nfs_end_page_writeback(req);
596 nfs_release_request(req); 599 nfs_release_request(req);
597} 600}
598 601
599static bool
600nfs_error_is_fatal_on_server(int err)
601{
602 switch (err) {
603 case 0:
604 case -ERESTARTSYS:
605 case -EINTR:
606 return false;
607 }
608 return nfs_error_is_fatal(err);
609}
610
611/* 602/*
612 * Find an associated nfs write request, and prepare to flush it out 603 * Find an associated nfs write request, and prepare to flush it out
613 * May return an error if the user signalled nfs_wait_on_request(). 604 * May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
615static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 606static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
616 struct page *page) 607 struct page *page)
617{ 608{
618 struct address_space *mapping;
619 struct nfs_page *req; 609 struct nfs_page *req;
620 int ret = 0; 610 int ret = 0;
621 611
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
630 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
631 621
632 /* If there is a fatal error that covers this write, just exit */ 622 /* If there is a fatal error that covers this write, just exit */
633 ret = 0; 623 ret = pgio->pg_error;
634 mapping = page_file_mapping(page); 624 if (nfs_error_is_fatal_on_server(ret))
635 if (test_bit(AS_ENOSPC, &mapping->flags) ||
636 test_bit(AS_EIO, &mapping->flags))
637 goto out_launder; 625 goto out_launder;
638 626
627 ret = 0;
639 if (!nfs_pageio_add_request(pgio, req)) { 628 if (!nfs_pageio_add_request(pgio, req)) {
640 ret = pgio->pg_error; 629 ret = pgio->pg_error;
641 /* 630 /*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
647 } else 636 } else
648 ret = -EAGAIN; 637 ret = -EAGAIN;
649 nfs_redirty_request(req); 638 nfs_redirty_request(req);
639 pgio->pg_error = 0;
650 } else 640 } else
651 nfs_add_stats(page_file_mapping(page)->host, 641 nfs_add_stats(page_file_mapping(page)->host,
652 NFSIOS_WRITEPAGES, 1); 642 NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
666 ret = nfs_page_async_flush(pgio, page); 656 ret = nfs_page_async_flush(pgio, page);
667 if (ret == -EAGAIN) { 657 if (ret == -EAGAIN) {
668 redirty_page_for_writepage(wbc, page); 658 redirty_page_for_writepage(wbc, page);
669 ret = 0; 659 ret = AOP_WRITEPAGE_ACTIVATE;
670 } 660 }
671 return ret; 661 return ret;
672} 662}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
685 nfs_pageio_init_write(&pgio, inode, 0, 675 nfs_pageio_init_write(&pgio, inode, 0,
686 false, &nfs_async_write_completion_ops); 676 false, &nfs_async_write_completion_ops);
687 err = nfs_do_writepage(page, wbc, &pgio); 677 err = nfs_do_writepage(page, wbc, &pgio);
678 pgio.pg_error = 0;
688 nfs_pageio_complete(&pgio); 679 nfs_pageio_complete(&pgio);
689 if (err < 0) 680 if (err < 0)
690 return err; 681 return err;
691 if (pgio.pg_error < 0) 682 if (nfs_error_is_fatal(pgio.pg_error))
692 return pgio.pg_error; 683 return pgio.pg_error;
693 return 0; 684 return 0;
694} 685}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
698 int ret; 689 int ret;
699 690
700 ret = nfs_writepage_locked(page, wbc); 691 ret = nfs_writepage_locked(page, wbc);
701 unlock_page(page); 692 if (ret != AOP_WRITEPAGE_ACTIVATE)
693 unlock_page(page);
702 return ret; 694 return ret;
703} 695}
704 696
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
707 int ret; 699 int ret;
708 700
709 ret = nfs_do_writepage(page, wbc, data); 701 ret = nfs_do_writepage(page, wbc, data);
710 unlock_page(page); 702 if (ret != AOP_WRITEPAGE_ACTIVATE)
703 unlock_page(page);
711 return ret; 704 return ret;
712} 705}
713 706
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
733 &nfs_async_write_completion_ops); 726 &nfs_async_write_completion_ops);
734 pgio.pg_io_completion = ioc; 727 pgio.pg_io_completion = ioc;
735 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 728 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
729 pgio.pg_error = 0;
736 nfs_pageio_complete(&pgio); 730 nfs_pageio_complete(&pgio);
737 nfs_io_completion_put(ioc); 731 nfs_io_completion_put(ioc);
738 732
739 if (err < 0) 733 if (err < 0)
740 goto out_err; 734 goto out_err;
741 err = pgio.pg_error; 735 err = pgio.pg_error;
742 if (err < 0) 736 if (nfs_error_is_fatal(err))
743 goto out_err; 737 goto out_err;
744 return 0; 738 return 0;
745out_err: 739out_err:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
571 */ 571 */
572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
573{ 573{
574 struct nfsd_net *nn = v; 574 struct nfsd_net *nn = m->private;
575 575
576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
577 seq_printf(m, "num entries: %u\n", 577 seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..3cf4f6aa48d6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
1171 return inode; 1171 return inode;
1172} 1172}
1173 1173
1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
1175{ 1175{
1176 struct inode *inode; 1176 struct inode *inode;
1177 1177
1178 inode = nfsd_get_inode(dir->i_sb, mode); 1178 inode = nfsd_get_inode(dir->i_sb, mode);
1179 if (!inode) 1179 if (!inode)
1180 return -ENOMEM; 1180 return -ENOMEM;
1181 if (ncl) {
1182 inode->i_private = ncl;
1183 kref_get(&ncl->cl_ref);
1184 }
1181 d_add(dentry, inode); 1185 d_add(dentry, inode);
1182 inc_nlink(dir); 1186 inc_nlink(dir);
1183 fsnotify_mkdir(dir, dentry); 1187 fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
1194 dentry = d_alloc_name(parent, name); 1198 dentry = d_alloc_name(parent, name);
1195 if (!dentry) 1199 if (!dentry)
1196 goto out_err; 1200 goto out_err;
1197 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600); 1201 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
1198 if (ret) 1202 if (ret)
1199 goto out_err; 1203 goto out_err;
1200 if (ncl) {
1201 d_inode(dentry)->i_private = ncl;
1202 kref_get(&ncl->cl_ref);
1203 }
1204out: 1204out:
1205 inode_unlock(dir); 1205 inode_unlock(dir);
1206 return dentry; 1206 return dentry;
1207out_err: 1207out_err:
1208 dput(dentry);
1208 dentry = ERR_PTR(ret); 1209 dentry = ERR_PTR(ret);
1209 goto out; 1210 goto out;
1210} 1211}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
1214 struct nfsdfs_client *ncl = inode->i_private; 1215 struct nfsdfs_client *ncl = inode->i_private;
1215 1216
1216 inode->i_private = NULL; 1217 inode->i_private = NULL;
1217 synchronize_rcu();
1218 kref_put(&ncl->cl_ref, ncl->cl_release); 1218 kref_put(&ncl->cl_ref, ncl->cl_release);
1219} 1219}
1220 1220
1221
1222static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode) 1221static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
1223{ 1222{
1224 struct nfsdfs_client *nc = inode->i_private; 1223 struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
1232{ 1231{
1233 struct nfsdfs_client *nc; 1232 struct nfsdfs_client *nc;
1234 1233
1235 rcu_read_lock(); 1234 inode_lock_shared(inode);
1236 nc = __get_nfsdfs_client(inode); 1235 nc = __get_nfsdfs_client(inode);
1237 rcu_read_unlock(); 1236 inode_unlock_shared(inode);
1238 return nc; 1237 return nc;
1239} 1238}
1240/* from __rpc_unlink */ 1239/* from __rpc_unlink */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 385f3aaa2448..90c830e3758e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
3825 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); 3825 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3826 int low_bucket = 0, bucket, high_bucket; 3826 int low_bucket = 0, bucket, high_bucket;
3827 struct ocfs2_xattr_bucket *search; 3827 struct ocfs2_xattr_bucket *search;
3828 u32 last_hash;
3829 u64 blkno, lower_blkno = 0; 3828 u64 blkno, lower_blkno = 0;
3830 3829
3831 search = ocfs2_xattr_bucket_new(inode); 3830 search = ocfs2_xattr_bucket_new(inode);
@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
3869 if (xh->xh_count) 3868 if (xh->xh_count)
3870 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; 3869 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
3871 3870
3872 last_hash = le32_to_cpu(xe->xe_name_hash);
3873
3874 /* record lower_blkno which may be the insert place. */ 3871 /* record lower_blkno which may be the insert place. */
3875 lower_blkno = blkno; 3872 lower_blkno = blkno;
3876 3873
diff --git a/fs/open.c b/fs/open.c
index b5b80469b93d..a59abe3c669a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
374 override_cred->cap_permitted; 374 override_cred->cap_permitted;
375 } 375 }
376 376
377 /*
378 * The new set of credentials can *only* be used in
379 * task-synchronous circumstances, and does not need
380 * RCU freeing, unless somebody then takes a separate
381 * reference to it.
382 *
383 * NOTE! This is _only_ true because this credential
384 * is used purely for override_creds() that installs
385 * it as the subjective cred. Other threads will be
386 * accessing ->real_cred, not the subjective cred.
387 *
388 * If somebody _does_ make a copy of this (using the
389 * 'get_current_cred()' function), that will clear the
390 * non_rcu field, because now that other user may be
391 * expecting RCU freeing. But normal thread-synchronous
392 * cred accesses will keep things non-RCY.
393 */
394 override_cred->non_rcu = 1;
395
377 old_cred = override_creds(override_cred); 396 old_cred = override_creds(override_cred);
378retry: 397retry:
379 res = user_path_at(dfd, filename, lookup_flags, &path); 398 res = user_path_at(dfd, filename, lookup_flags, &path);
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; 1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
1812} 1812}
1813 1813
1814/* 1814/* Read a page's worth of file data into the page cache. */
1815 * Read a page's worth of file data into the page cache. Return the page
1816 * locked.
1817 */
1818static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) 1815static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1819{ 1816{
1820 struct page *page; 1817 struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1826 put_page(page); 1823 put_page(page);
1827 return ERR_PTR(-EIO); 1824 return ERR_PTR(-EIO);
1828 } 1825 }
1829 lock_page(page);
1830 return page; 1826 return page;
1831} 1827}
1832 1828
1833/* 1829/*
1830 * Lock two pages, ensuring that we lock in offset order if the pages are from
1831 * the same file.
1832 */
1833static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1834{
1835 /* Always lock in order of increasing index. */
1836 if (page1->index > page2->index)
1837 swap(page1, page2);
1838
1839 lock_page(page1);
1840 if (page1 != page2)
1841 lock_page(page2);
1842}
1843
1844/* Unlock two pages, being careful not to unlock the same page twice. */
1845static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1846{
1847 unlock_page(page1);
1848 if (page1 != page2)
1849 unlock_page(page2);
1850}
1851
1852/*
1834 * Compare extents of two files to see if they are the same. 1853 * Compare extents of two files to see if they are the same.
1835 * Caller must have locked both inodes to prevent write races. 1854 * Caller must have locked both inodes to prevent write races.
1836 */ 1855 */
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1867 dest_page = vfs_dedupe_get_page(dest, destoff); 1886 dest_page = vfs_dedupe_get_page(dest, destoff);
1868 if (IS_ERR(dest_page)) { 1887 if (IS_ERR(dest_page)) {
1869 error = PTR_ERR(dest_page); 1888 error = PTR_ERR(dest_page);
1870 unlock_page(src_page);
1871 put_page(src_page); 1889 put_page(src_page);
1872 goto out_error; 1890 goto out_error;
1873 } 1891 }
1892
1893 vfs_lock_two_pages(src_page, dest_page);
1894
1895 /*
1896 * Now that we've locked both pages, make sure they're still
1897 * mapped to the file data we're interested in. If not,
1898 * someone is invalidating pages on us and we lose.
1899 */
1900 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1901 src_page->mapping != src->i_mapping ||
1902 dest_page->mapping != dest->i_mapping) {
1903 same = false;
1904 goto unlock;
1905 }
1906
1874 src_addr = kmap_atomic(src_page); 1907 src_addr = kmap_atomic(src_page);
1875 dest_addr = kmap_atomic(dest_page); 1908 dest_addr = kmap_atomic(dest_page);
1876 1909
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1882 1915
1883 kunmap_atomic(dest_addr); 1916 kunmap_atomic(dest_addr);
1884 kunmap_atomic(src_addr); 1917 kunmap_atomic(src_addr);
1885 unlock_page(dest_page); 1918unlock:
1886 unlock_page(src_page); 1919 vfs_unlock_two_pages(src_page, dest_page);
1887 put_page(dest_page); 1920 put_page(dest_page);
1888 put_page(src_page); 1921 put_page(src_page);
1889 1922
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 04f09689cd6d..1600034a929b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
119 } 119 }
120 if (seq_has_overflowed(m)) 120 if (seq_has_overflowed(m))
121 goto Eoverflow; 121 goto Eoverflow;
122 p = m->op->next(m, p, &m->index);
122 if (pos + m->count > offset) { 123 if (pos + m->count > offset) {
123 m->from = offset - pos; 124 m->from = offset - pos;
124 m->count -= m->from; 125 m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
126 } 127 }
127 pos += m->count; 128 pos += m->count;
128 m->count = 0; 129 m->count = 0;
129 p = m->op->next(m, p, &m->index);
130 if (pos == offset) 130 if (pos == offset)
131 break; 131 break;
132 } 132 }
diff --git a/fs/super.c b/fs/super.c
index 113c58f19425..5960578a4076 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super);
478 478
479bool mount_capable(struct fs_context *fc) 479bool mount_capable(struct fs_context *fc)
480{ 480{
481 struct user_namespace *user_ns = fc->global ? &init_user_ns
482 : fc->user_ns;
483
484 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) 481 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
485 return capable(CAP_SYS_ADMIN); 482 return capable(CAP_SYS_ADMIN);
486 else 483 else
487 return ns_capable(user_ns, CAP_SYS_ADMIN); 484 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
488} 485}
489 486
490/** 487/**
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
51static void shrink_liability(struct ubifs_info *c, int nr_to_write) 51static void shrink_liability(struct ubifs_info *c, int nr_to_write)
52{ 52{
53 down_read(&c->vfs_sb->s_umount); 53 down_read(&c->vfs_sb->s_umount);
54 writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); 54 writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
55 up_read(&c->vfs_sb->s_umount); 55 up_read(&c->vfs_sb->s_umount);
56} 56}
57 57
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..3b4b4114f208 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) 129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
130{ 130{
131 if (orph->del) { 131 if (orph->del) {
132 spin_unlock(&c->orphan_lock);
133 dbg_gen("deleted twice ino %lu", orph->inum); 132 dbg_gen("deleted twice ino %lu", orph->inum);
134 return; 133 return;
135 } 134 }
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
138 orph->del = 1; 137 orph->del = 1;
139 orph->dnext = c->orph_dnext; 138 orph->dnext = c->orph_dnext;
140 c->orph_dnext = orph; 139 c->orph_dnext = orph;
141 spin_unlock(&c->orphan_lock);
142 dbg_gen("delete later ino %lu", orph->inum); 140 dbg_gen("delete later ino %lu", orph->inum);
143 return; 141 return;
144 } 142 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 2c0803b0ac3a..8c1d571334bc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -609,6 +609,10 @@ static int init_constants_early(struct ubifs_info *c)
609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
610 if (c->max_bu_buf_len > c->leb_size) 610 if (c->max_bu_buf_len > c->leb_size)
611 c->max_bu_buf_len = c->leb_size; 611 c->max_bu_buf_len = c->leb_size;
612
613 /* Log is ready, preserve one LEB for commits. */
614 c->min_log_bytes = c->leb_size;
615
612 return 0; 616 return 0;
613} 617}
614 618
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..fe6d804a38dc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
880 /* len == 0 means wake all */ 880 /* len == 0 means wake all */
881 struct userfaultfd_wake_range range = { .len = 0, }; 881 struct userfaultfd_wake_range range = { .len = 0, };
882 unsigned long new_flags; 882 unsigned long new_flags;
883 bool still_valid;
883 884
884 WRITE_ONCE(ctx->released, true); 885 WRITE_ONCE(ctx->released, true);
885 886
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
895 * taking the mmap_sem for writing. 896 * taking the mmap_sem for writing.
896 */ 897 */
897 down_write(&mm->mmap_sem); 898 down_write(&mm->mmap_sem);
898 if (!mmget_still_valid(mm)) 899 still_valid = mmget_still_valid(mm);
899 goto skip_mm;
900 prev = NULL; 900 prev = NULL;
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { 901 for (vma = mm->mmap; vma; vma = vma->vm_next) {
902 cond_resched(); 902 cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
907 continue; 907 continue;
908 } 908 }
909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); 909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
910 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 910 if (still_valid) {
911 new_flags, vma->anon_vma, 911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
912 vma->vm_file, vma->vm_pgoff, 912 new_flags, vma->anon_vma,
913 vma_policy(vma), 913 vma->vm_file, vma->vm_pgoff,
914 NULL_VM_UFFD_CTX); 914 vma_policy(vma),
915 if (prev) 915 NULL_VM_UFFD_CTX);
916 vma = prev; 916 if (prev)
917 else 917 vma = prev;
918 prev = vma; 918 else
919 prev = vma;
920 }
919 vma->vm_flags = new_flags; 921 vma->vm_flags = new_flags;
920 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 922 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
921 } 923 }
922skip_mm:
923 up_write(&mm->mmap_sem); 924 up_write(&mm->mmap_sem);
924 mmput(mm); 925 mmput(mm);
925wakeup: 926wakeup:
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index baf0b72c0a37..07aad70f3931 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3835,15 +3835,28 @@ xfs_bmapi_read(
3835 XFS_STATS_INC(mp, xs_blk_mapr); 3835 XFS_STATS_INC(mp, xs_blk_mapr);
3836 3836
3837 ifp = XFS_IFORK_PTR(ip, whichfork); 3837 ifp = XFS_IFORK_PTR(ip, whichfork);
3838 if (!ifp) {
3839 /* No CoW fork? Return a hole. */
3840 if (whichfork == XFS_COW_FORK) {
3841 mval->br_startoff = bno;
3842 mval->br_startblock = HOLESTARTBLOCK;
3843 mval->br_blockcount = len;
3844 mval->br_state = XFS_EXT_NORM;
3845 *nmap = 1;
3846 return 0;
3847 }
3838 3848
3839 /* No CoW fork? Return a hole. */ 3849 /*
3840 if (whichfork == XFS_COW_FORK && !ifp) { 3850 * A missing attr ifork implies that the inode says we're in
3841 mval->br_startoff = bno; 3851 * extents or btree format but failed to pass the inode fork
3842 mval->br_startblock = HOLESTARTBLOCK; 3852 * verifier while trying to load it. Treat that as a file
3843 mval->br_blockcount = len; 3853 * corruption too.
3844 mval->br_state = XFS_EXT_NORM; 3854 */
3845 *nmap = 1; 3855#ifdef DEBUG
3846 return 0; 3856 xfs_alert(mp, "%s: inode %llu missing fork %d",
3857 __func__, ip->i_ino, whichfork);
3858#endif /* DEBUG */
3859 return -EFSCORRUPTED;
3847 } 3860 }
3848 3861
3849 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3862 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index d1c77fd0815d..0bf56e94bfe9 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -487,10 +487,8 @@ xfs_da3_split(
487 ASSERT(state->path.active == 0); 487 ASSERT(state->path.active == 0);
488 oldblk = &state->path.blk[0]; 488 oldblk = &state->path.blk[0];
489 error = xfs_da3_root_split(state, oldblk, addblk); 489 error = xfs_da3_root_split(state, oldblk, addblk);
490 if (error) { 490 if (error)
491 addblk->bp = NULL; 491 goto out;
492 return error; /* GROT: dir is inconsistent */
493 }
494 492
495 /* 493 /*
496 * Update pointers to the node which used to be block 0 and just got 494 * Update pointers to the node which used to be block 0 and just got
@@ -505,7 +503,10 @@ xfs_da3_split(
505 */ 503 */
506 node = oldblk->bp->b_addr; 504 node = oldblk->bp->b_addr;
507 if (node->hdr.info.forw) { 505 if (node->hdr.info.forw) {
508 ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); 506 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
507 error = -EFSCORRUPTED;
508 goto out;
509 }
509 node = addblk->bp->b_addr; 510 node = addblk->bp->b_addr;
510 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 511 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
511 xfs_trans_log_buf(state->args->trans, addblk->bp, 512 xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +515,19 @@ xfs_da3_split(
514 } 515 }
515 node = oldblk->bp->b_addr; 516 node = oldblk->bp->b_addr;
516 if (node->hdr.info.back) { 517 if (node->hdr.info.back) {
517 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); 518 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
519 error = -EFSCORRUPTED;
520 goto out;
521 }
518 node = addblk->bp->b_addr; 522 node = addblk->bp->b_addr;
519 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 523 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
520 xfs_trans_log_buf(state->args->trans, addblk->bp, 524 xfs_trans_log_buf(state->args->trans, addblk->bp,
521 XFS_DA_LOGRANGE(node, &node->hdr.info, 525 XFS_DA_LOGRANGE(node, &node->hdr.info,
522 sizeof(node->hdr.info))); 526 sizeof(node->hdr.info)));
523 } 527 }
528out:
524 addblk->bp = NULL; 529 addblk->bp = NULL;
525 return 0; 530 return error;
526} 531}
527 532
528/* 533/*
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index afcc6642690a..1fc44efc344d 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry(
741 ents = dp->d_ops->leaf_ents_p(leaf); 741 ents = dp->d_ops->leaf_ents_p(leaf);
742 742
743 xfs_dir3_leaf_check(dp, bp); 743 xfs_dir3_leaf_check(dp, bp);
744 ASSERT(leafhdr.count > 0); 744 if (leafhdr.count <= 0)
745 return -EFSCORRUPTED;
745 746
746 /* 747 /*
747 * Look up the hash value in the leaf entries. 748 * Look up the hash value in the leaf entries.
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 94c4f1de1922..77ff9f97bcda 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -278,7 +278,11 @@ xchk_da_btree_block_check_sibling(
278 /* Compare upper level pointer to sibling pointer. */ 278 /* Compare upper level pointer to sibling pointer. */
279 if (ds->state->altpath.blk[level].blkno != sibling) 279 if (ds->state->altpath.blk[level].blkno != sibling)
280 xchk_da_set_corrupt(ds, level); 280 xchk_da_set_corrupt(ds, level);
281 xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp); 281 if (ds->state->altpath.blk[level].bp) {
282 xfs_trans_brelse(ds->dargs.trans,
283 ds->state->altpath.blk[level].bp);
284 ds->state->altpath.blk[level].bp = NULL;
285 }
282out: 286out:
283 return error; 287 return error;
284} 288}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..7bd7534f5051 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -547,63 +547,12 @@ xfs_file_compat_ioctl(
547 struct inode *inode = file_inode(filp); 547 struct inode *inode = file_inode(filp);
548 struct xfs_inode *ip = XFS_I(inode); 548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount; 549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = (void __user *)p; 550 void __user *arg = compat_ptr(p);
551 int error; 551 int error;
552 552
553 trace_xfs_file_compat_ioctl(ip); 553 trace_xfs_file_compat_ioctl(ip);
554 554
555 switch (cmd) { 555 switch (cmd) {
556 /* No size or alignment issues on any arch */
557 case XFS_IOC_DIOINFO:
558 case XFS_IOC_FSGEOMETRY_V4:
559 case XFS_IOC_FSGEOMETRY:
560 case XFS_IOC_AG_GEOMETRY:
561 case XFS_IOC_FSGETXATTR:
562 case XFS_IOC_FSSETXATTR:
563 case XFS_IOC_FSGETXATTRA:
564 case XFS_IOC_FSSETDM:
565 case XFS_IOC_GETBMAP:
566 case XFS_IOC_GETBMAPA:
567 case XFS_IOC_GETBMAPX:
568 case XFS_IOC_FSCOUNTS:
569 case XFS_IOC_SET_RESBLKS:
570 case XFS_IOC_GET_RESBLKS:
571 case XFS_IOC_FSGROWFSLOG:
572 case XFS_IOC_GOINGDOWN:
573 case XFS_IOC_ERROR_INJECTION:
574 case XFS_IOC_ERROR_CLEARALL:
575 case FS_IOC_GETFSMAP:
576 case XFS_IOC_SCRUB_METADATA:
577 case XFS_IOC_BULKSTAT:
578 case XFS_IOC_INUMBERS:
579 return xfs_file_ioctl(filp, cmd, p);
580#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
581 /*
582 * These are handled fine if no alignment issues. To support x32
583 * which uses native 64-bit alignment we must emit these cases in
584 * addition to the ia-32 compat set below.
585 */
586 case XFS_IOC_ALLOCSP:
587 case XFS_IOC_FREESP:
588 case XFS_IOC_RESVSP:
589 case XFS_IOC_UNRESVSP:
590 case XFS_IOC_ALLOCSP64:
591 case XFS_IOC_FREESP64:
592 case XFS_IOC_RESVSP64:
593 case XFS_IOC_UNRESVSP64:
594 case XFS_IOC_FSGEOMETRY_V1:
595 case XFS_IOC_FSGROWFSDATA:
596 case XFS_IOC_FSGROWFSRT:
597 case XFS_IOC_ZERO_RANGE:
598#ifdef CONFIG_X86_X32
599 /*
600 * x32 special: this gets a different cmd number from the ia-32 compat
601 * case below; the associated data will match native 64-bit alignment.
602 */
603 case XFS_IOC_SWAPEXT:
604#endif
605 return xfs_file_ioctl(filp, cmd, p);
606#endif
607#if defined(BROKEN_X86_ALIGNMENT) 556#if defined(BROKEN_X86_ALIGNMENT)
608 case XFS_IOC_ALLOCSP_32: 557 case XFS_IOC_ALLOCSP_32:
609 case XFS_IOC_FREESP_32: 558 case XFS_IOC_FREESP_32:
@@ -705,6 +654,7 @@ xfs_file_compat_ioctl(
705 case XFS_IOC_FSSETDM_BY_HANDLE_32: 654 case XFS_IOC_FSSETDM_BY_HANDLE_32:
706 return xfs_compat_fssetdm_by_handle(filp, arg); 655 return xfs_compat_fssetdm_by_handle(filp, arg);
707 default: 656 default:
708 return -ENOIOCTLCMD; 657 /* try the native version */
658 return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
709 } 659 }
710} 660}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..fe285d123d69 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -793,6 +793,7 @@ xfs_setattr_nonsize(
793 793
794out_cancel: 794out_cancel:
795 xfs_trans_cancel(tp); 795 xfs_trans_cancel(tp);
796 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796out_dqrele: 797out_dqrele:
797 xfs_qm_dqrele(udqp); 798 xfs_qm_dqrele(udqp);
798 xfs_qm_dqrele(gdqp); 799 xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index a8a06bb78ea8..f5c955d35be4 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat(
272 struct xfs_bstat *bs1, 272 struct xfs_bstat *bs1,
273 const struct xfs_bulkstat *bstat) 273 const struct xfs_bulkstat *bstat)
274{ 274{
275 /* memset is needed here because of padding holes in the structure. */
275 memset(bs1, 0, sizeof(struct xfs_bstat)); 276 memset(bs1, 0, sizeof(struct xfs_bstat));
276 bs1->bs_ino = bstat->bs_ino; 277 bs1->bs_ino = bstat->bs_ino;
277 bs1->bs_mode = bstat->bs_mode; 278 bs1->bs_mode = bstat->bs_mode;
@@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp(
388 struct xfs_inogrp *ig1, 389 struct xfs_inogrp *ig1,
389 const struct xfs_inumbers *ig) 390 const struct xfs_inumbers *ig)
390{ 391{
392 /* memset is needed here because of padding holes in the structure. */
393 memset(ig1, 0, sizeof(struct xfs_inogrp));
391 ig1->xi_startino = ig->xi_startino; 394 ig1->xi_startino = ig->xi_startino;
392 ig1->xi_alloccount = ig->xi_alloccount; 395 ig1->xi_alloccount = ig->xi_alloccount;
393 ig1->xi_allocmask = ig->xi_allocmask; 396 ig1->xi_allocmask = ig->xi_allocmask;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 00e9f5c388d3..7fc3c1ad36bc 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -429,10 +429,7 @@ xfs_log_reserve(
429 429
430 ASSERT(*ticp == NULL); 430 ASSERT(*ticp == NULL);
431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
432 KM_SLEEP | KM_MAYFAIL); 432 KM_SLEEP);
433 if (!tic)
434 return -ENOMEM;
435
436 *ticp = tic; 433 *ticp = tic;
437 434
438 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 435 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..a339bd5fa260 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,7 +32,7 @@ xfs_break_leased_layouts(
32 struct xfs_inode *ip = XFS_I(inode); 32 struct xfs_inode *ip = XFS_I(inode);
33 int error; 33 int error;
34 34
35 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { 35 while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
36 xfs_iunlock(ip, *iolock); 36 xfs_iunlock(ip, *iolock);
37 *did_unlock = true; 37 *did_unlock = true;
38 error = break_layout(inode, true); 38 error = break_layout(inode, true);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..edbe37b7f636 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks(
1190} 1190}
1191 1191
1192/* 1192/*
1193 * Grab the exclusive iolock for a data copy from src to dest, making 1193 * Grab the exclusive iolock for a data copy from src to dest, making sure to
1194 * sure to abide vfs locking order (lowest pointer value goes first) and 1194 * abide vfs locking order (lowest pointer value goes first) and breaking the
1195 * breaking the pnfs layout leases on dest before proceeding. The loop 1195 * layout leases before proceeding. The loop is needed because we cannot call
1196 * is needed because we cannot call the blocking break_layout() with the 1196 * the blocking break_layout() with the iolocks held, and therefore have to
1197 * src iolock held, and therefore have to back out both locks. 1197 * back out both locks.
1198 */ 1198 */
1199static int 1199static int
1200xfs_iolock_two_inodes_and_break_layout( 1200xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout(
1203{ 1203{
1204 int error; 1204 int error;
1205 1205
1206retry: 1206 if (src > dest)
1207 if (src < dest) { 1207 swap(src, dest);
1208 inode_lock_shared(src);
1209 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1210 } else {
1211 /* src >= dest */
1212 inode_lock(dest);
1213 }
1214 1208
1215 error = break_layout(dest, false); 1209retry:
1216 if (error == -EWOULDBLOCK) { 1210 /* Wait to break both inodes' layouts before we start locking. */
1217 inode_unlock(dest); 1211 error = break_layout(src, true);
1218 if (src < dest) 1212 if (error)
1219 inode_unlock_shared(src); 1213 return error;
1214 if (src != dest) {
1220 error = break_layout(dest, true); 1215 error = break_layout(dest, true);
1221 if (error) 1216 if (error)
1222 return error; 1217 return error;
1223 goto retry;
1224 } 1218 }
1219
1220 /* Lock one inode and make sure nobody got in and leased it. */
1221 inode_lock(src);
1222 error = break_layout(src, false);
1225 if (error) { 1223 if (error) {
1224 inode_unlock(src);
1225 if (error == -EWOULDBLOCK)
1226 goto retry;
1227 return error;
1228 }
1229
1230 if (src == dest)
1231 return 0;
1232
1233 /* Lock the other inode and make sure nobody got in and leased it. */
1234 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1235 error = break_layout(dest, false);
1236 if (error) {
1237 inode_unlock(src);
1226 inode_unlock(dest); 1238 inode_unlock(dest);
1227 if (src < dest) 1239 if (error == -EWOULDBLOCK)
1228 inode_unlock_shared(src); 1240 goto retry;
1229 return error; 1241 return error;
1230 } 1242 }
1231 if (src > dest) 1243
1232 inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
1233 return 0; 1244 return 0;
1234} 1245}
1235 1246
@@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock(
1247 1258
1248 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1259 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1249 if (!same_inode) 1260 if (!same_inode)
1250 xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1261 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1251 inode_unlock(inode_out); 1262 inode_unlock(inode_out);
1252 if (!same_inode) 1263 if (!same_inode)
1253 inode_unlock_shared(inode_in); 1264 inode_unlock(inode_in);
1254} 1265}
1255 1266
1256/* 1267/*
@@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep(
1325 if (same_inode) 1336 if (same_inode)
1326 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1337 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1327 else 1338 else
1328 xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest, 1339 xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
1329 XFS_MMAPLOCK_EXCL); 1340 XFS_MMAPLOCK_EXCL);
1330 1341
1331 /* Check file eligibility and prepare for block sharing. */ 1342 /* Check file eligibility and prepare for block sharing. */
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index bb6cb347018c..f6947da70d71 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -19,9 +19,24 @@
19 19
20#define p4d_alloc(mm, pgd, address) (pgd) 20#define p4d_alloc(mm, pgd, address) (pgd)
21#define p4d_offset(pgd, start) (pgd) 21#define p4d_offset(pgd, start) (pgd)
22#define p4d_none(p4d) 0 22
23#define p4d_bad(p4d) 0 23#ifndef __ASSEMBLY__
24#define p4d_present(p4d) 1 24static inline int p4d_none(p4d_t p4d)
25{
26 return 0;
27}
28
29static inline int p4d_bad(p4d_t p4d)
30{
31 return 0;
32}
33
34static inline int p4d_present(p4d_t p4d)
35{
36 return 1;
37}
38#endif
39
25#define p4d_ERROR(p4d) do { } while (0) 40#define p4d_ERROR(p4d) do { } while (0)
26#define p4d_clear(p4d) pgd_clear(p4d) 41#define p4d_clear(p4d) pgd_clear(p4d)
27#define p4d_val(p4d) pgd_val(p4d) 42#define p4d_val(p4d) pgd_val(p4d)
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 8666fe7f35d7..02970b11f71f 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
118static inline int 118static inline int
119arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) 119arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
120{ 120{
121 int oldval = 0, ret; 121 return -ENOSYS;
122
123 pagefault_disable();
124
125 switch (op) {
126 case FUTEX_OP_SET:
127 case FUTEX_OP_ADD:
128 case FUTEX_OP_OR:
129 case FUTEX_OP_ANDN:
130 case FUTEX_OP_XOR:
131 default:
132 ret = -ENOSYS;
133 }
134
135 pagefault_enable();
136
137 if (!ret)
138 *oval = oldval;
139
140 return ret;
141} 122}
142 123
143static inline int 124static inline int
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7a52be..e9f20b813a69 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include <linux/log2.h> 8#include <linux/log2.h>
9 9
10/*
11 * Runtime evaluation of get_order()
12 */
13static inline __attribute_const__
14int __get_order(unsigned long size)
15{
16 int order;
17
18 size--;
19 size >>= PAGE_SHIFT;
20#if BITS_PER_LONG == 32
21 order = fls(size);
22#else
23 order = fls64(size);
24#endif
25 return order;
26}
27
28/** 10/**
29 * get_order - Determine the allocation order of a memory size 11 * get_order - Determine the allocation order of a memory size
30 * @size: The size for which to get the order 12 * @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
43 * to hold an object of the specified size. 25 * to hold an object of the specified size.
44 * 26 *
45 * The result is undefined if the size is 0. 27 * The result is undefined if the size is 0.
46 *
47 * This function may be used to initialise variables with compile time
48 * evaluations of constants.
49 */ 28 */
50#define get_order(n) \ 29static inline __attribute_const__ int get_order(unsigned long size)
51( \ 30{
52 __builtin_constant_p(n) ? ( \ 31 if (__builtin_constant_p(size)) {
53 ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ 32 if (!size)
54 (((n) < (1UL << PAGE_SHIFT)) ? 0 : \ 33 return BITS_PER_LONG - PAGE_SHIFT;
55 ilog2((n) - 1) - PAGE_SHIFT + 1) \ 34
56 ) : \ 35 if (size < (1UL << PAGE_SHIFT))
57 __get_order(n) \ 36 return 0;
58) 37
38 return ilog2((size) - 1) - PAGE_SHIFT + 1;
39 }
40
41 size--;
42 size >>= PAGE_SHIFT;
43#if BITS_PER_LONG == 32
44 return fls(size);
45#else
46 return fls64(size);
47#endif
48}
59 49
60#endif /* __ASSEMBLY__ */ 50#endif /* __ASSEMBLY__ */
61 51
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 72d51d1e9dd9..5cf2c5dd8b1e 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -149,6 +149,8 @@ struct drm_client_buffer {
149struct drm_client_buffer * 149struct drm_client_buffer *
150drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); 150drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
151void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); 151void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
152void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
153void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
152 154
153int drm_client_modeset_create(struct drm_client_dev *client); 155int drm_client_modeset_create(struct drm_client_dev *client);
154void drm_client_modeset_free(struct drm_client_dev *client); 156void drm_client_modeset_free(struct drm_client_dev *client);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 759d462d028b..f57eea0481e0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -853,6 +853,13 @@ struct drm_mode_config {
853 uint32_t preferred_depth, prefer_shadow; 853 uint32_t preferred_depth, prefer_shadow;
854 854
855 /** 855 /**
856 * @prefer_shadow_fbdev:
857 *
858 * Hint to framebuffer emulation to prefer shadow-fb rendering.
859 */
860 bool prefer_shadow_fbdev;
861
862 /**
856 * @quirk_addfb_prefer_xbgr_30bpp: 863 * @quirk_addfb_prefer_xbgr_30bpp:
857 * 864 *
858 * Special hack for legacy ADDFB to keep nouveau userspace happy. Should 865 * Special hack for legacy ADDFB to keep nouveau userspace happy. Should
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 16c769a7f979..6db030439e29 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -34,6 +34,7 @@ struct kvm_pmu {
34u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 34u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
35void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 35void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
36u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 36u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
37void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
37void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 38void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
38void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 39void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
39void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 40void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
@@ -71,6 +72,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
71{ 72{
72 return 0; 73 return 0;
73} 74}
75static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
74static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 76static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
75static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 77static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
76static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 78static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 46bbc949c20a..7a30524a80ee 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
350 350
351void kvm_vgic_load(struct kvm_vcpu *vcpu); 351void kvm_vgic_load(struct kvm_vcpu *vcpu);
352void kvm_vgic_put(struct kvm_vcpu *vcpu); 352void kvm_vgic_put(struct kvm_vcpu *vcpu);
353void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
353 354
354#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 355#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
355#define vgic_initialized(k) ((k)->arch.vgic.initialized) 356#define vgic_initialized(k) ((k)->arch.vgic.initialized)
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 689a58231288..12811091fd50 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -181,6 +181,7 @@ struct blkcg_policy {
181 181
182extern struct blkcg blkcg_root; 182extern struct blkcg blkcg_root;
183extern struct cgroup_subsys_state * const blkcg_root_css; 183extern struct cgroup_subsys_state * const blkcg_root_css;
184extern bool blkcg_debug_stats;
184 185
185struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 186struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
186 struct request_queue *q, bool update_hint); 187 struct request_queue *q, bool update_hint);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 55cb455cfcb0..a5dfbaf2470d 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -170,6 +170,8 @@ struct ccp_aes_engine {
170 enum ccp_aes_mode mode; 170 enum ccp_aes_mode mode;
171 enum ccp_aes_action action; 171 enum ccp_aes_action action;
172 172
173 u32 authsize;
174
173 struct scatterlist *key; 175 struct scatterlist *key;
174 u32 key_len; /* In bytes */ 176 u32 key_len; /* In bytes */
175 177
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb29b1a3..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
30 30
31static inline void ceph_buffer_put(struct ceph_buffer *b) 31static inline void ceph_buffer_put(struct ceph_buffer *b)
32{ 32{
33 kref_put(&b->kref, ceph_buffer_release); 33 if (b)
34 kref_put(&b->kref, ceph_buffer_release);
34} 35}
35 36
36extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); 37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 3c096c7a51dc..853a8f181394 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
359/** 359/**
360 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 360 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
361 * @dev: device for clock "consumer" 361 * @dev: device for clock "consumer"
362 * @num_clks: the number of clk_bulk_data
362 * @clks: pointer to the clk_bulk_data table of consumer 363 * @clks: pointer to the clk_bulk_data table of consumer
363 * 364 *
364 * Behaves the same as devm_clk_bulk_get() except where there is no clock 365 * Behaves the same as devm_clk_bulk_get() except where there is no clock
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 6b6c7396a584..cb732643471b 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -50,7 +50,6 @@ struct cn_dev {
50 50
51 u32 seq, groups; 51 u32 seq, groups;
52 struct sock *nls; 52 struct sock *nls;
53 void (*input) (struct sk_buff *skb);
54 53
55 struct cn_queue_dev *cbdev; 54 struct cn_queue_dev *cbdev;
56}; 55};
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 7eb43a038330..f7a30e0099be 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -145,7 +145,11 @@ struct cred {
145 struct user_struct *user; /* real user ID subscription */ 145 struct user_struct *user; /* real user ID subscription */
146 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ 146 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
147 struct group_info *group_info; /* supplementary groups for euid/fsgid */ 147 struct group_info *group_info; /* supplementary groups for euid/fsgid */
148 struct rcu_head rcu; /* RCU deletion hook */ 148 /* RCU deletion */
149 union {
150 int non_rcu; /* Can we skip RCU deletion? */
151 struct rcu_head rcu; /* RCU deletion hook */
152 };
149} __randomize_layout; 153} __randomize_layout;
150 154
151extern void __put_cred(struct cred *); 155extern void __put_cred(struct cred *);
@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
246 if (!cred) 250 if (!cred)
247 return cred; 251 return cred;
248 validate_creds(cred); 252 validate_creds(cred);
253 nonconst_cred->non_rcu = 0;
249 return get_new_cred(nonconst_cred); 254 return get_new_cred(nonconst_cred);
250} 255}
251 256
@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
257 if (!atomic_inc_not_zero(&nonconst_cred->usage)) 262 if (!atomic_inc_not_zero(&nonconst_cred->usage))
258 return NULL; 263 return NULL;
259 validate_creds(cred); 264 validate_creds(cred);
265 nonconst_cred->non_rcu = 0;
260 return cred; 266 return cred;
261} 267}
262 268
diff --git a/include/linux/device.h b/include/linux/device.h
index c330b75c6c57..6717adee33f0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -915,6 +915,8 @@ struct dev_links_info {
915 * This identifies the device type and carries type-specific 915 * This identifies the device type and carries type-specific
916 * information. 916 * information.
917 * @mutex: Mutex to synchronize calls to its driver. 917 * @mutex: Mutex to synchronize calls to its driver.
918 * @lockdep_mutex: An optional debug lock that a subsystem can use as a
919 * peer lock to gain localized lockdep coverage of the device_lock.
918 * @bus: Type of bus device is on. 920 * @bus: Type of bus device is on.
919 * @driver: Which driver has allocated this 921 * @driver: Which driver has allocated this
920 * @platform_data: Platform data specific to the device. 922 * @platform_data: Platform data specific to the device.
@@ -998,6 +1000,9 @@ struct device {
998 core doesn't touch it */ 1000 core doesn't touch it */
999 void *driver_data; /* Driver data, set and get with 1001 void *driver_data; /* Driver data, set and get with
1000 dev_set_drvdata/dev_get_drvdata */ 1002 dev_set_drvdata/dev_get_drvdata */
1003#ifdef CONFIG_PROVE_LOCKING
1004 struct mutex lockdep_mutex;
1005#endif
1001 struct mutex mutex; /* mutex to synchronize calls to 1006 struct mutex mutex; /* mutex to synchronize calls to
1002 * its driver. 1007 * its driver.
1003 */ 1008 */
@@ -1383,6 +1388,7 @@ extern int (*platform_notify_remove)(struct device *dev);
1383 */ 1388 */
1384extern struct device *get_device(struct device *dev); 1389extern struct device *get_device(struct device *dev);
1385extern void put_device(struct device *dev); 1390extern void put_device(struct device *dev);
1391extern bool kill_device(struct device *dev);
1386 1392
1387#ifdef CONFIG_DEVTMPFS 1393#ifdef CONFIG_DEVTMPFS
1388extern int devtmpfs_create_node(struct device *dev); 1394extern int devtmpfs_create_node(struct device *dev);
diff --git a/include/linux/dim.h b/include/linux/dim.h
index d3a0fbfff2bb..9fa4b3f88c39 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -272,62 +272,6 @@ dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
272 272
273/* Net DIM */ 273/* Net DIM */
274 274
275/*
276 * Net DIM profiles:
277 * There are different set of profiles for each CQ period mode.
278 * There are different set of profiles for RX/TX CQs.
279 * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
280 */
281#define NET_DIM_PARAMS_NUM_PROFILES 5
282#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
283#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
284#define NET_DIM_DEF_PROFILE_CQE 1
285#define NET_DIM_DEF_PROFILE_EQE 1
286
287#define NET_DIM_RX_EQE_PROFILES { \
288 {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
289 {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
290 {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
291 {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
292 {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
293}
294
295#define NET_DIM_RX_CQE_PROFILES { \
296 {2, 256}, \
297 {8, 128}, \
298 {16, 64}, \
299 {32, 64}, \
300 {64, 64} \
301}
302
303#define NET_DIM_TX_EQE_PROFILES { \
304 {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
305 {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
306 {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
307 {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
308 {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
309}
310
311#define NET_DIM_TX_CQE_PROFILES { \
312 {5, 128}, \
313 {8, 64}, \
314 {16, 32}, \
315 {32, 32}, \
316 {64, 32} \
317}
318
319static const struct dim_cq_moder
320rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
321 NET_DIM_RX_EQE_PROFILES,
322 NET_DIM_RX_CQE_PROFILES,
323};
324
325static const struct dim_cq_moder
326tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
327 NET_DIM_TX_EQE_PROFILES,
328 NET_DIM_TX_CQE_PROFILES,
329};
330
331/** 275/**
332 * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile 276 * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
333 * @cq_period_mode: CQ period mode 277 * @cq_period_mode: CQ period mode
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index c05d4e661489..03f8e98e3bcc 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
161 gfp_t gfp) 161 gfp_t gfp)
162{ 162{
163 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 163 return NULL;
164 size_t align = get_order(PAGE_ALIGN(size));
165
166 return alloc_pages_node(node, gfp, align);
167} 164}
168 165
169static inline void dma_free_contiguous(struct device *dev, struct page *page, 166static inline void dma_free_contiguous(struct device *dev, struct page *page,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e11b115dd0e4..f7d1eea32c78 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -689,8 +689,8 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
689 */ 689 */
690static inline bool dma_addressing_limited(struct device *dev) 690static inline bool dma_addressing_limited(struct device *dev)
691{ 691{
692 return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) < 692 return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
693 dma_get_required_mask(dev); 693 dma_get_required_mask(dev);
694} 694}
695 695
696#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS 696#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 3813211a9aad..0bff3d7fac92 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
42 dma_addr_t dma_addr, unsigned long attrs); 42 dma_addr_t dma_addr, unsigned long attrs);
43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
44 dma_addr_t dma_addr); 44 dma_addr_t dma_addr);
45
46#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
47pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 45pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
48 unsigned long attrs); 46 unsigned long attrs);
47
48#ifdef CONFIG_MMU
49pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
49#else 50#else
50# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) 51static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
51#endif 52 unsigned long attrs)
53{
54 return prot; /* no protection bits supported without page tables */
55}
56#endif /* CONFIG_MMU */
52 57
53#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 58#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
54void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 59void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 17cd0078377c..1dd014c9c87b 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -45,7 +45,6 @@ struct elevator_mq_ops {
45 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); 45 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
46 bool (*has_work)(struct blk_mq_hw_ctx *); 46 bool (*has_work)(struct blk_mq_hw_ctx *);
47 void (*completed_request)(struct request *, u64); 47 void (*completed_request)(struct request *, u64);
48 void (*started_request)(struct request *);
49 void (*requeue_request)(struct request *); 48 void (*requeue_request)(struct request *);
50 struct request *(*former_request)(struct request_queue *, struct request *); 49 struct request *(*former_request)(struct request_queue *, struct request *);
51 struct request *(*next_request)(struct request_queue *, struct request *); 50 struct request *(*next_request)(struct request_queue *, struct request *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ff65d22cf336..92c6e31fb008 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -24,6 +24,7 @@
24 24
25#include <net/sch_generic.h> 25#include <net/sch_generic.h>
26 26
27#include <asm/byteorder.h>
27#include <uapi/linux/filter.h> 28#include <uapi/linux/filter.h>
28#include <uapi/linux/bpf.h> 29#include <uapi/linux/bpf.h>
29 30
@@ -747,6 +748,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
747 return size <= size_default && (size & (size - 1)) == 0; 748 return size <= size_default && (size & (size - 1)) == 0;
748} 749}
749 750
751static inline u8
752bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default)
753{
754 u8 load_off = off & (size_default - 1);
755
756#ifdef __LITTLE_ENDIAN
757 return load_off * 8;
758#else
759 return (size_default - (load_off + size)) * 8;
760#endif
761}
762
750#define bpf_ctx_wide_access_ok(off, size, type, field) \ 763#define bpf_ctx_wide_access_ok(off, size, type, field) \
751 (size == sizeof(__u64) && \ 764 (size == sizeof(__u64) && \
752 off >= offsetof(type, field) && \ 765 off >= offsetof(type, field) && \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 56b8e358af5c..997a530ff4e9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
2598 void *holder); 2598 void *holder);
2599extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, 2599extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
2600 void *holder); 2600 void *holder);
2601extern struct block_device *bd_start_claiming(struct block_device *bdev,
2602 void *holder);
2603extern void bd_finish_claiming(struct block_device *bdev,
2604 struct block_device *whole, void *holder);
2605extern void bd_abort_claiming(struct block_device *bdev,
2606 struct block_device *whole, void *holder);
2601extern void blkdev_put(struct block_device *bdev, fmode_t mode); 2607extern void blkdev_put(struct block_device *bdev, fmode_t mode);
2602extern int __blkdev_reread_part(struct block_device *bdev); 2608extern int __blkdev_reread_part(struct block_device *bdev);
2603extern int blkdev_reread_part(struct block_device *bdev); 2609extern int blkdev_reread_part(struct block_device *bdev);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..f33881688f42 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
510} 510}
511extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 511extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
512 struct vm_area_struct *vma, unsigned long addr, 512 struct vm_area_struct *vma, unsigned long addr,
513 int node, bool hugepage); 513 int node);
514#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
515 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
516#else 514#else
517#define alloc_pages(gfp_mask, order) \ 515#define alloc_pages(gfp_mask, order) \
518 alloc_pages_node(numa_node_id(), gfp_mask, order) 516 alloc_pages_node(numa_node_id(), gfp_mask, order)
519#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 517#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
520 alloc_pages(gfp_mask, order)
521#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
522 alloc_pages(gfp_mask, order) 518 alloc_pages(gfp_mask, order)
523#endif 519#endif
524#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 520#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
525#define alloc_page_vma(gfp_mask, vma, addr) \ 521#define alloc_page_vma(gfp_mask, vma, addr) \
526 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 522 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
527#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 523#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
528 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 524 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
529 525
530extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 526extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
531extern unsigned long get_zeroed_page(gfp_t gfp_mask); 527extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 40915b461f18..f757a58191a6 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq)
241 return -EINVAL; 241 return -EINVAL;
242} 242}
243 243
244static inline int
245gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
246 unsigned int gpio_offset, unsigned int pin_offset,
247 unsigned int npins)
248{
249 WARN_ON(1);
250 return -EINVAL;
251}
252
253static inline int
254gpiochip_add_pingroup_range(struct gpio_chip *chip,
255 struct pinctrl_dev *pctldev,
256 unsigned int gpio_offset, const char *pin_group)
257{
258 WARN_ON(1);
259 return -EINVAL;
260}
261
262static inline void
263gpiochip_remove_pin_ranges(struct gpio_chip *chip)
264{
265 WARN_ON(1);
266}
267
268static inline int devm_gpio_request(struct device *dev, unsigned gpio, 244static inline int devm_gpio_request(struct device *dev, unsigned gpio,
269 const char *label) 245 const char *label)
270{ 246{
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 9ddcf50a3c59..a7f08fb0f865 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
247 might_sleep(); 247 might_sleep();
248 248
249 /* GPIO can never have been requested */ 249 /* GPIO can never have been requested */
250 WARN_ON(1); 250 WARN_ON(desc);
251} 251}
252 252
253static inline void devm_gpiod_unhinge(struct device *dev, 253static inline void devm_gpiod_unhinge(struct device *dev,
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
256 might_sleep(); 256 might_sleep();
257 257
258 /* GPIO can never have been requested */ 258 /* GPIO can never have been requested */
259 WARN_ON(1); 259 WARN_ON(desc);
260} 260}
261 261
262static inline void gpiod_put_array(struct gpio_descs *descs) 262static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
264 might_sleep(); 264 might_sleep();
265 265
266 /* GPIO can never have been requested */ 266 /* GPIO can never have been requested */
267 WARN_ON(1); 267 WARN_ON(descs);
268} 268}
269 269
270static inline struct gpio_desc *__must_check 270static inline struct gpio_desc *__must_check
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
317 might_sleep(); 317 might_sleep();
318 318
319 /* GPIO can never have been requested */ 319 /* GPIO can never have been requested */
320 WARN_ON(1); 320 WARN_ON(desc);
321} 321}
322 322
323static inline void devm_gpiod_put_array(struct device *dev, 323static inline void devm_gpiod_put_array(struct device *dev,
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
326 might_sleep(); 326 might_sleep();
327 327
328 /* GPIO can never have been requested */ 328 /* GPIO can never have been requested */
329 WARN_ON(1); 329 WARN_ON(descs);
330} 330}
331 331
332 332
333static inline int gpiod_get_direction(const struct gpio_desc *desc) 333static inline int gpiod_get_direction(const struct gpio_desc *desc)
334{ 334{
335 /* GPIO can never have been requested */ 335 /* GPIO can never have been requested */
336 WARN_ON(1); 336 WARN_ON(desc);
337 return -ENOSYS; 337 return -ENOSYS;
338} 338}
339static inline int gpiod_direction_input(struct gpio_desc *desc) 339static inline int gpiod_direction_input(struct gpio_desc *desc)
340{ 340{
341 /* GPIO can never have been requested */ 341 /* GPIO can never have been requested */
342 WARN_ON(1); 342 WARN_ON(desc);
343 return -ENOSYS; 343 return -ENOSYS;
344} 344}
345static inline int gpiod_direction_output(struct gpio_desc *desc, int value) 345static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
346{ 346{
347 /* GPIO can never have been requested */ 347 /* GPIO can never have been requested */
348 WARN_ON(1); 348 WARN_ON(desc);
349 return -ENOSYS; 349 return -ENOSYS;
350} 350}
351static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) 351static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
352{ 352{
353 /* GPIO can never have been requested */ 353 /* GPIO can never have been requested */
354 WARN_ON(1); 354 WARN_ON(desc);
355 return -ENOSYS; 355 return -ENOSYS;
356} 356}
357 357
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
359static inline int gpiod_get_value(const struct gpio_desc *desc) 359static inline int gpiod_get_value(const struct gpio_desc *desc)
360{ 360{
361 /* GPIO can never have been requested */ 361 /* GPIO can never have been requested */
362 WARN_ON(1); 362 WARN_ON(desc);
363 return 0; 363 return 0;
364} 364}
365static inline int gpiod_get_array_value(unsigned int array_size, 365static inline int gpiod_get_array_value(unsigned int array_size,
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
368 unsigned long *value_bitmap) 368 unsigned long *value_bitmap)
369{ 369{
370 /* GPIO can never have been requested */ 370 /* GPIO can never have been requested */
371 WARN_ON(1); 371 WARN_ON(desc_array);
372 return 0; 372 return 0;
373} 373}
374static inline void gpiod_set_value(struct gpio_desc *desc, int value) 374static inline void gpiod_set_value(struct gpio_desc *desc, int value)
375{ 375{
376 /* GPIO can never have been requested */ 376 /* GPIO can never have been requested */
377 WARN_ON(1); 377 WARN_ON(desc);
378} 378}
379static inline int gpiod_set_array_value(unsigned int array_size, 379static inline int gpiod_set_array_value(unsigned int array_size,
380 struct gpio_desc **desc_array, 380 struct gpio_desc **desc_array,
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
382 unsigned long *value_bitmap) 382 unsigned long *value_bitmap)
383{ 383{
384 /* GPIO can never have been requested */ 384 /* GPIO can never have been requested */
385 WARN_ON(1); 385 WARN_ON(desc_array);
386 return 0; 386 return 0;
387} 387}
388static inline int gpiod_get_raw_value(const struct gpio_desc *desc) 388static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
389{ 389{
390 /* GPIO can never have been requested */ 390 /* GPIO can never have been requested */
391 WARN_ON(1); 391 WARN_ON(desc);
392 return 0; 392 return 0;
393} 393}
394static inline int gpiod_get_raw_array_value(unsigned int array_size, 394static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
397 unsigned long *value_bitmap) 397 unsigned long *value_bitmap)
398{ 398{
399 /* GPIO can never have been requested */ 399 /* GPIO can never have been requested */
400 WARN_ON(1); 400 WARN_ON(desc_array);
401 return 0; 401 return 0;
402} 402}
403static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) 403static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
404{ 404{
405 /* GPIO can never have been requested */ 405 /* GPIO can never have been requested */
406 WARN_ON(1); 406 WARN_ON(desc);
407} 407}
408static inline int gpiod_set_raw_array_value(unsigned int array_size, 408static inline int gpiod_set_raw_array_value(unsigned int array_size,
409 struct gpio_desc **desc_array, 409 struct gpio_desc **desc_array,
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
411 unsigned long *value_bitmap) 411 unsigned long *value_bitmap)
412{ 412{
413 /* GPIO can never have been requested */ 413 /* GPIO can never have been requested */
414 WARN_ON(1); 414 WARN_ON(desc_array);
415 return 0; 415 return 0;
416} 416}
417 417
418static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) 418static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
419{ 419{
420 /* GPIO can never have been requested */ 420 /* GPIO can never have been requested */
421 WARN_ON(1); 421 WARN_ON(desc);
422 return 0; 422 return 0;
423} 423}
424static inline int gpiod_get_array_value_cansleep(unsigned int array_size, 424static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
427 unsigned long *value_bitmap) 427 unsigned long *value_bitmap)
428{ 428{
429 /* GPIO can never have been requested */ 429 /* GPIO can never have been requested */
430 WARN_ON(1); 430 WARN_ON(desc_array);
431 return 0; 431 return 0;
432} 432}
433static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) 433static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
434{ 434{
435 /* GPIO can never have been requested */ 435 /* GPIO can never have been requested */
436 WARN_ON(1); 436 WARN_ON(desc);
437} 437}
438static inline int gpiod_set_array_value_cansleep(unsigned int array_size, 438static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
439 struct gpio_desc **desc_array, 439 struct gpio_desc **desc_array,
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
441 unsigned long *value_bitmap) 441 unsigned long *value_bitmap)
442{ 442{
443 /* GPIO can never have been requested */ 443 /* GPIO can never have been requested */
444 WARN_ON(1); 444 WARN_ON(desc_array);
445 return 0; 445 return 0;
446} 446}
447static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) 447static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
448{ 448{
449 /* GPIO can never have been requested */ 449 /* GPIO can never have been requested */
450 WARN_ON(1); 450 WARN_ON(desc);
451 return 0; 451 return 0;
452} 452}
453static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, 453static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
456 unsigned long *value_bitmap) 456 unsigned long *value_bitmap)
457{ 457{
458 /* GPIO can never have been requested */ 458 /* GPIO can never have been requested */
459 WARN_ON(1); 459 WARN_ON(desc_array);
460 return 0; 460 return 0;
461} 461}
462static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, 462static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
463 int value) 463 int value)
464{ 464{
465 /* GPIO can never have been requested */ 465 /* GPIO can never have been requested */
466 WARN_ON(1); 466 WARN_ON(desc);
467} 467}
468static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, 468static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
469 struct gpio_desc **desc_array, 469 struct gpio_desc **desc_array,
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
471 unsigned long *value_bitmap) 471 unsigned long *value_bitmap)
472{ 472{
473 /* GPIO can never have been requested */ 473 /* GPIO can never have been requested */
474 WARN_ON(1); 474 WARN_ON(desc_array);
475 return 0; 475 return 0;
476} 476}
477 477
478static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) 478static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
479{ 479{
480 /* GPIO can never have been requested */ 480 /* GPIO can never have been requested */
481 WARN_ON(1); 481 WARN_ON(desc);
482 return -ENOSYS; 482 return -ENOSYS;
483} 483}
484 484
485static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) 485static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
486{ 486{
487 /* GPIO can never have been requested */ 487 /* GPIO can never have been requested */
488 WARN_ON(1); 488 WARN_ON(desc);
489 return -ENOSYS; 489 return -ENOSYS;
490} 490}
491 491
492static inline int gpiod_is_active_low(const struct gpio_desc *desc) 492static inline int gpiod_is_active_low(const struct gpio_desc *desc)
493{ 493{
494 /* GPIO can never have been requested */ 494 /* GPIO can never have been requested */
495 WARN_ON(1); 495 WARN_ON(desc);
496 return 0; 496 return 0;
497} 497}
498static inline int gpiod_cansleep(const struct gpio_desc *desc) 498static inline int gpiod_cansleep(const struct gpio_desc *desc)
499{ 499{
500 /* GPIO can never have been requested */ 500 /* GPIO can never have been requested */
501 WARN_ON(1); 501 WARN_ON(desc);
502 return 0; 502 return 0;
503} 503}
504 504
505static inline int gpiod_to_irq(const struct gpio_desc *desc) 505static inline int gpiod_to_irq(const struct gpio_desc *desc)
506{ 506{
507 /* GPIO can never have been requested */ 507 /* GPIO can never have been requested */
508 WARN_ON(1); 508 WARN_ON(desc);
509 return -EINVAL; 509 return -EINVAL;
510} 510}
511 511
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
513 const char *name) 513 const char *name)
514{ 514{
515 /* GPIO can never have been requested */ 515 /* GPIO can never have been requested */
516 WARN_ON(1); 516 WARN_ON(desc);
517 return -EINVAL; 517 return -EINVAL;
518} 518}
519 519
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
525static inline int desc_to_gpio(const struct gpio_desc *desc) 525static inline int desc_to_gpio(const struct gpio_desc *desc)
526{ 526{
527 /* GPIO can never have been requested */ 527 /* GPIO can never have been requested */
528 WARN_ON(1); 528 WARN_ON(desc);
529 return -EINVAL; 529 return -EINVAL;
530} 530}
531 531
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b8a08b2a10ca..7ef56dc18050 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
484 */ 484 */
485#define HMM_RANGE_DEFAULT_TIMEOUT 1000 485#define HMM_RANGE_DEFAULT_TIMEOUT 1000
486 486
487/* This is a temporary helper to avoid merge conflict between trees. */
488static inline bool hmm_vma_range_done(struct hmm_range *range)
489{
490 bool ret = hmm_range_valid(range);
491
492 hmm_range_unregister(range);
493 return ret;
494}
495
496/* This is a temporary helper to avoid merge conflict between trees. */
497static inline int hmm_vma_fault(struct hmm_mirror *mirror,
498 struct hmm_range *range, bool block)
499{
500 long ret;
501
502 /*
503 * With the old API the driver must set each individual entries with
504 * the requested flags (valid, write, ...). So here we set the mask to
505 * keep intact the entries provided by the driver and zero out the
506 * default_flags.
507 */
508 range->default_flags = 0;
509 range->pfn_flags_mask = -1UL;
510
511 ret = hmm_range_register(range, mirror,
512 range->start, range->end,
513 PAGE_SHIFT);
514 if (ret)
515 return (int)ret;
516
517 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
518 /*
519 * The mmap_sem was taken by driver we release it here and
520 * returns -EAGAIN which correspond to mmap_sem have been
521 * drop in the old API.
522 */
523 up_read(&range->vma->vm_mm->mmap_sem);
524 return -EAGAIN;
525 }
526
527 ret = hmm_range_fault(range, block);
528 if (ret <= 0) {
529 if (ret == -EBUSY || !ret) {
530 /* Same as above, drop mmap_sem to match old API. */
531 up_read(&range->vma->vm_mm->mmap_sem);
532 ret = -EBUSY;
533 } else if (ret == -EAGAIN)
534 ret = -EBUSY;
535 hmm_range_unregister(range);
536 return ret;
537 }
538 return 0;
539}
540
541/* Below are for HMM internal use only! Not to be used by device driver! */ 487/* Below are for HMM internal use only! Not to be used by device driver! */
542static inline void hmm_mm_init(struct mm_struct *mm) 488static inline void hmm_mm_init(struct mm_struct *mm)
543{ 489{
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 8b728750a625..69e813bcb947 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
80extern void unregister_pppox_proto(int proto_num); 80extern void unregister_pppox_proto(int proto_num);
81extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ 81extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
82extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 82extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
83extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
84
85#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
83 86
84/* PPPoX socket states */ 87/* PPPoX socket states */
85enum { 88enum {
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index b4f5403383fc..9661416a9bb4 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -41,11 +41,11 @@ struct rmnet_map_ul_csum_header {
41 __be16 csum_start_offset; 41 __be16 csum_start_offset;
42#if defined(__LITTLE_ENDIAN_BITFIELD) 42#if defined(__LITTLE_ENDIAN_BITFIELD)
43 u16 csum_insert_offset:14; 43 u16 csum_insert_offset:14;
44 u16 udp_ip4_ind:1; 44 u16 udp_ind:1;
45 u16 csum_enabled:1; 45 u16 csum_enabled:1;
46#elif defined (__BIG_ENDIAN_BITFIELD) 46#elif defined (__BIG_ENDIAN_BITFIELD)
47 u16 csum_enabled:1; 47 u16 csum_enabled:1;
48 u16 udp_ip4_ind:1; 48 u16 udp_ind:1;
49 u16 csum_insert_offset:14; 49 u16 csum_insert_offset:14;
50#else 50#else
51#error "Please fix <asm/byteorder.h>" 51#error "Please fix <asm/byteorder.h>"
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 781b96ac706f..a0637abffee8 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
155void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 155void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
156void init_iova_domain(struct iova_domain *iovad, unsigned long granule, 156void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
157 unsigned long start_pfn); 157 unsigned long start_pfn);
158bool has_iova_flush_queue(struct iova_domain *iovad);
158int init_iova_flush_queue(struct iova_domain *iovad, 159int init_iova_flush_queue(struct iova_domain *iovad,
159 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); 160 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
160struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 161struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
235{ 236{
236} 237}
237 238
239static inline bool has_iova_flush_queue(struct iova_domain *iovad)
240{
241 return false;
242}
243
238static inline int init_iova_flush_queue(struct iova_domain *iovad, 244static inline int init_iova_flush_queue(struct iova_domain *iovad,
239 iova_flush_cb flush_cb, 245 iova_flush_cb flush_cb,
240 iova_entry_dtor entry_dtor) 246 iova_entry_dtor entry_dtor)
diff --git a/include/linux/key.h b/include/linux/key.h
index 91f391cd272e..50028338a4cc 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -94,11 +94,11 @@ struct keyring_index_key {
94 union { 94 union {
95 struct { 95 struct {
96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ 96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
97 u8 desc_len; 97 u16 desc_len;
98 char desc[sizeof(long) - 1]; /* First few chars of description */ 98 char desc[sizeof(long) - 2]; /* First few chars of description */
99#else 99#else
100 char desc[sizeof(long) - 1]; /* First few chars of description */ 100 char desc[sizeof(long) - 2]; /* First few chars of description */
101 u8 desc_len; 101 u16 desc_len;
102#endif 102#endif
103 }; 103 };
104 unsigned long x; 104 unsigned long x;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5c5b5867024c..fcb46b3374c6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -861,8 +861,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
861void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 861void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
862void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 862void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
863 863
864bool kvm_arch_has_vcpu_debugfs(void); 864#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
865int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); 865void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
866#endif
866 867
867int kvm_arch_hardware_enable(void); 868int kvm_arch_hardware_enable(void);
868void kvm_arch_hardware_disable(void); 869void kvm_arch_hardware_disable(void);
@@ -872,6 +873,7 @@ int kvm_arch_check_processor_compat(void);
872int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 873int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
873bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 874bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
874int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 875int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
876bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
875 877
876#ifndef __KVM_HAVE_ARCH_VM_ALLOC 878#ifndef __KVM_HAVE_ARCH_VM_ALLOC
877/* 879/*
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index cbd9d8495690..88e1e6304a71 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, 117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
118 resource_size_t hw_addr, resource_size_t size); 118 resource_size_t hw_addr, resource_size_t size);
119int logic_pio_register_range(struct logic_pio_hwaddr *newrange); 119int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
120void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
120resource_size_t logic_pio_to_hwaddr(unsigned long pio); 121resource_size_t logic_pio_to_hwaddr(unsigned long pio);
121unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); 122unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
122 123
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 44c41462be33..2cd4359cb38c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
668 668
669void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 669void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
670 int val); 670 int val);
671void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
671 672
672static inline void mod_lruvec_state(struct lruvec *lruvec, 673static inline void mod_lruvec_state(struct lruvec *lruvec,
673 enum node_stat_item idx, int val) 674 enum node_stat_item idx, int val)
@@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
1072 mod_node_page_state(page_pgdat(page), idx, val); 1073 mod_node_page_state(page_pgdat(page), idx, val);
1073} 1074}
1074 1075
1076static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1077 int val)
1078{
1079 struct page *page = virt_to_head_page(p);
1080
1081 __mod_node_page_state(page_pgdat(page), idx, val);
1082}
1083
1075static inline 1084static inline
1076unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1085unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1077 gfp_t gfp_mask, 1086 gfp_t gfp_mask,
@@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
1159 __mod_lruvec_page_state(page, idx, -1); 1168 __mod_lruvec_page_state(page, idx, -1);
1160} 1169}
1161 1170
1171static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1172{
1173 __mod_lruvec_slab_state(p, idx, 1);
1174}
1175
1176static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1177{
1178 __mod_lruvec_slab_state(p, idx, -1);
1179}
1180
1162/* idx can be of type enum memcg_stat_item or node_stat_item */ 1181/* idx can be of type enum memcg_stat_item or node_stat_item */
1163static inline void inc_memcg_state(struct mem_cgroup *memcg, 1182static inline void inc_memcg_state(struct mem_cgroup *memcg,
1164 int idx) 1183 int idx)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..bac395f1d00a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
139struct mempolicy *get_task_policy(struct task_struct *p); 139struct mempolicy *get_task_policy(struct task_struct *p);
140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141 unsigned long addr); 141 unsigned long addr);
142struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
143 unsigned long addr);
142bool vma_policy_mof(struct vm_area_struct *vma); 144bool vma_policy_mof(struct vm_area_struct *vma);
143 145
144extern void numa_default_policy(void); 146extern void numa_default_policy(void);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index ce9839c8bc1a..c2f056b5766d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -446,11 +446,11 @@ enum {
446}; 446};
447 447
448enum { 448enum {
449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, 449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
450}; 450};
451 451
452enum { 452enum {
453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, 453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
454}; 454};
455 455
456enum { 456enum {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 04a569568eac..f049af3f3cd8 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -220,6 +220,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
220 220
221struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); 221struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
222void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 222void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
223u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
223void mlx5_fc_query_cached(struct mlx5_fc *counter, 224void mlx5_fc_query_cached(struct mlx5_fc *counter,
224 u64 *bytes, u64 *packets, u64 *lastuse); 225 u64 *bytes, u64 *packets, u64 *lastuse);
225int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, 226int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index b3d5752657d9..b8b570c30b5e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -5975,10 +5975,12 @@ struct mlx5_ifc_modify_cq_in_bits {
5975 5975
5976 struct mlx5_ifc_cqc_bits cq_context; 5976 struct mlx5_ifc_cqc_bits cq_context;
5977 5977
5978 u8 reserved_at_280[0x40]; 5978 u8 reserved_at_280[0x60];
5979 5979
5980 u8 cq_umem_valid[0x1]; 5980 u8 cq_umem_valid[0x1];
5981 u8 reserved_at_2c1[0x5bf]; 5981 u8 reserved_at_2e1[0x1f];
5982
5983 u8 reserved_at_300[0x580];
5982 5984
5983 u8 pas[0][0x40]; 5985 u8 pas[0][0x40];
5984}; 5986};
@@ -10052,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits {
10052}; 10054};
10053 10055
10054struct mlx5_ifc_tls_progress_params_bits { 10056struct mlx5_ifc_tls_progress_params_bits {
10055 u8 valid[0x1]; 10057 u8 reserved_at_0[0x8];
10056 u8 reserved_at_1[0x7]; 10058 u8 tisn[0x18];
10057 u8 pd[0x18];
10058 10059
10059 u8 next_record_tcp_sn[0x20]; 10060 u8 next_record_tcp_sn[0x20];
10060 10061
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a37a89eb7a7..6a7a1083b6fb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -159,7 +159,16 @@ struct page {
159 /** @pgmap: Points to the hosting device page map. */ 159 /** @pgmap: Points to the hosting device page map. */
160 struct dev_pagemap *pgmap; 160 struct dev_pagemap *pgmap;
161 void *zone_device_data; 161 void *zone_device_data;
162 unsigned long _zd_pad_1; /* uses mapping */ 162 /*
163 * ZONE_DEVICE private pages are counted as being
164 * mapped so the next 3 words hold the mapping, index,
165 * and private fields from the source anonymous or
166 * page cache page while the page is migrated to device
167 * private memory.
168 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
169 * use the mapping, index, and private fields when
170 * pmem backed DAX files are mapped.
171 */
163 }; 172 };
164 173
165 /** @rcu_head: You can use this to free a page by RCU. */ 174 /** @rcu_head: You can use this to free a page by RCU. */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d77d717c620c..3f38c30d2f13 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,8 +215,9 @@ enum node_stat_item {
215 NR_INACTIVE_FILE, /* " " " " " */ 215 NR_INACTIVE_FILE, /* " " " " " */
216 NR_ACTIVE_FILE, /* " " " " " */ 216 NR_ACTIVE_FILE, /* " " " " " */
217 NR_UNEVICTABLE, /* " " " " " */ 217 NR_UNEVICTABLE, /* " " " " " */
218 NR_SLAB_RECLAIMABLE, 218 NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
219 NR_SLAB_UNRECLAIMABLE, 219 NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
220 * memcg_flush_percpu_vmstats() first. */
220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 221 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 222 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
222 WORKINGSET_NODES, 223 WORKINGSET_NODES,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b2c1648f7e5d..5714fd35a83c 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -814,6 +814,7 @@ struct tee_client_device_id {
814/** 814/**
815 * struct wmi_device_id - WMI device identifier 815 * struct wmi_device_id - WMI device identifier
816 * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba 816 * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
817 * @context: pointer to driver specific data
817 */ 818 */
818struct wmi_device_id { 819struct wmi_device_id {
819 const char guid_string[UUID_STRING_LEN+1]; 820 const char guid_string[UUID_STRING_LEN+1];
diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h
index 91d6275292a5..19df78341fb3 100644
--- a/include/linux/netfilter/nf_conntrack_h323_asn1.h
+++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h
@@ -1,7 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only */
2/**************************************************************************** 2/****************************************************************************
3 * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 3 * BER and PER decoding library for H.323 conntrack/NAT module.
4 * conntrack/NAT module.
5 * 4 *
6 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> 5 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
7 * 6 *
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index 7a6871ac8784..74c6f9241944 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -4,6 +4,9 @@
4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> 4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
5 */ 5 */
6 6
7#ifndef _NF_CONNTRACK_H323_TYPES_H
8#define _NF_CONNTRACK_H323_TYPES_H
9
7typedef struct TransportAddress_ipAddress { /* SEQUENCE */ 10typedef struct TransportAddress_ipAddress { /* SEQUENCE */
8 int options; /* No use */ 11 int options; /* No use */
9 unsigned int ip; 12 unsigned int ip;
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
931 InfoRequestResponse infoRequestResponse; 934 InfoRequestResponse infoRequestResponse;
932 }; 935 };
933} RasMessage; 936} RasMessage;
937
938#endif /* _NF_CONNTRACK_H323_TYPES_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 0cf857012f11..844f89e1b039 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np,
1164} 1164}
1165 1165
1166/** 1166/**
1167 * of_property_read_bool - Findfrom a property 1167 * of_property_read_bool - Find a property
1168 * @np: device node from which the property value is to be read. 1168 * @np: device node from which the property value is to be read.
1169 * @propname: name of the property to be searched. 1169 * @propname: name of the property to be searched.
1170 * 1170 *
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 1dda31825ec4..71283739ffd2 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -32,6 +32,7 @@
32 32
33#endif /* CONFIG_SPARSEMEM */ 33#endif /* CONFIG_SPARSEMEM */
34 34
35#ifndef BUILD_VDSO32_64
35/* 36/*
36 * page->flags layout: 37 * page->flags layout:
37 * 38 *
@@ -76,20 +77,22 @@
76#define LAST_CPUPID_SHIFT 0 77#define LAST_CPUPID_SHIFT 0
77#endif 78#endif
78 79
79#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS 80#ifdef CONFIG_KASAN_SW_TAGS
81#define KASAN_TAG_WIDTH 8
82#else
83#define KASAN_TAG_WIDTH 0
84#endif
85
86#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
87 <= BITS_PER_LONG - NR_PAGEFLAGS
80#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT 88#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
81#else 89#else
82#define LAST_CPUPID_WIDTH 0 90#define LAST_CPUPID_WIDTH 0
83#endif 91#endif
84 92
85#ifdef CONFIG_KASAN_SW_TAGS
86#define KASAN_TAG_WIDTH 8
87#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ 93#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
88 > BITS_PER_LONG - NR_PAGEFLAGS 94 > BITS_PER_LONG - NR_PAGEFLAGS
89#error "KASAN: not enough bits in page flags for tag" 95#error "Not enough bits in page flags"
90#endif
91#else
92#define KASAN_TAG_WIDTH 0
93#endif 96#endif
94 97
95/* 98/*
@@ -104,4 +107,5 @@
104#define LAST_CPUPID_NOT_IN_PAGE_FLAGS 107#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
105#endif 108#endif
106 109
110#endif
107#endif /* _LINUX_PAGE_FLAGS_LAYOUT */ 111#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b848517da64c..f91cb8898ff0 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -152,6 +152,8 @@ enum pageflags {
152 PG_savepinned = PG_dirty, 152 PG_savepinned = PG_dirty,
153 /* Has a grant mapping of another (foreign) domain's page. */ 153 /* Has a grant mapping of another (foreign) domain's page. */
154 PG_foreign = PG_owner_priv_1, 154 PG_foreign = PG_owner_priv_1,
155 /* Remapped by swiotlb-xen. */
156 PG_xen_remapped = PG_owner_priv_1,
155 157
156 /* SLOB */ 158 /* SLOB */
157 PG_slob_free = PG_private, 159 PG_slob_free = PG_private,
@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
329 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 331 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
330PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 332PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
331PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 333PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
334PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
335 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
332 336
333PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 337PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
334 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 338 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9e700d9f9f28..82e4cd1b7ac3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
1567 1567
1568#ifdef CONFIG_PCIEASPM 1568#ifdef CONFIG_PCIEASPM
1569bool pcie_aspm_support_enabled(void); 1569bool pcie_aspm_support_enabled(void);
1570bool pcie_aspm_enabled(struct pci_dev *pdev);
1570#else 1571#else
1571static inline bool pcie_aspm_support_enabled(void) { return false; } 1572static inline bool pcie_aspm_support_enabled(void) { return false; }
1573static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1572#endif 1574#endif
1573 1575
1574#ifdef CONFIG_PCIEAER 1576#ifdef CONFIG_PCIEAER
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 462b90b73f93..2fb9c8ffaf10 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1107,6 +1107,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
1107int genphy_c45_read_mdix(struct phy_device *phydev); 1107int genphy_c45_read_mdix(struct phy_device *phydev);
1108int genphy_c45_pma_read_abilities(struct phy_device *phydev); 1108int genphy_c45_pma_read_abilities(struct phy_device *phydev);
1109int genphy_c45_read_status(struct phy_device *phydev); 1109int genphy_c45_read_status(struct phy_device *phydev);
1110int genphy_c45_config_aneg(struct phy_device *phydev);
1110 1111
1111/* The gen10g_* functions are the old Clause 45 stub */ 1112/* The gen10g_* functions are the old Clause 45 stub */
1112int gen10g_config_aneg(struct phy_device *phydev); 1113int gen10g_config_aneg(struct phy_device *phydev);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8dc1811487f5..9f51932bd543 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1092,7 +1092,15 @@ struct task_struct {
1092 u64 last_sum_exec_runtime; 1092 u64 last_sum_exec_runtime;
1093 struct callback_head numa_work; 1093 struct callback_head numa_work;
1094 1094
1095 struct numa_group *numa_group; 1095 /*
1096 * This pointer is only modified for current in syscall and
1097 * pagefault context (and for tasks being destroyed), so it can be read
1098 * from any of the following contexts:
1099 * - RCU read-side critical section
1100 * - current->numa_group from everywhere
1101 * - task's runqueue locked, task not running
1102 */
1103 struct numa_group __rcu *numa_group;
1096 1104
1097 /* 1105 /*
1098 * numa_faults is an array split into four regions: 1106 * numa_faults is an array split into four regions:
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index e7dd04a84ba8..3988762efe15 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -19,7 +19,7 @@
19extern void task_numa_fault(int last_node, int node, int pages, int flags); 19extern void task_numa_fault(int last_node, int node, int pages, int flags);
20extern pid_t task_numa_group_id(struct task_struct *p); 20extern pid_t task_numa_group_id(struct task_struct *p);
21extern void set_numabalancing_state(bool enabled); 21extern void set_numabalancing_state(bool enabled);
22extern void task_numa_free(struct task_struct *p); 22extern void task_numa_free(struct task_struct *p, bool final);
23extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, 23extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
24 int src_nid, int dst_cpu); 24 int src_nid, int dst_cpu);
25#else 25#else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
34static inline void set_numabalancing_state(bool enabled) 34static inline void set_numabalancing_state(bool enabled)
35{ 35{
36} 36}
37static inline void task_numa_free(struct task_struct *p) 37static inline void task_numa_free(struct task_struct *p, bool final)
38{ 38{
39} 39}
40static inline bool should_numa_migrate_memory(struct task_struct *p, 40static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b5d99482d3fe..1a5f88316b08 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
282extern void exit_signals(struct task_struct *tsk); 282extern void exit_signals(struct task_struct *tsk);
283extern void kernel_sigaction(int, __sighandler_t); 283extern void kernel_sigaction(int, __sighandler_t);
284 284
285#define SIG_KTHREAD ((__force __sighandler_t)2)
286#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
287
285static inline void allow_signal(int sig) 288static inline void allow_signal(int sig)
286{ 289{
287 /* 290 /*
@@ -289,7 +292,17 @@ static inline void allow_signal(int sig)
289 * know it'll be handled, so that they don't get converted to 292 * know it'll be handled, so that they don't get converted to
290 * SIGKILL or just silently dropped. 293 * SIGKILL or just silently dropped.
291 */ 294 */
292 kernel_sigaction(sig, (__force __sighandler_t)2); 295 kernel_sigaction(sig, SIG_KTHREAD);
296}
297
298static inline void allow_kernel_signal(int sig)
299{
300 /*
301 * Kernel threads handle their own signals. Let the signal code
302 * know signals sent by the kernel will be handled, so that they
303 * don't get silently dropped.
304 */
305 kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
293} 306}
294 307
295static inline void disallow_signal(int sig) 308static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d8af86d995d6..ba5583522d24 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1374,6 +1374,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1374 to->l4_hash = from->l4_hash; 1374 to->l4_hash = from->l4_hash;
1375}; 1375};
1376 1376
1377static inline void skb_copy_decrypted(struct sk_buff *to,
1378 const struct sk_buff *from)
1379{
1380#ifdef CONFIG_TLS_DEVICE
1381 to->decrypted = from->decrypted;
1382#endif
1383}
1384
1377#ifdef NET_SKBUFF_DATA_USES_OFFSET 1385#ifdef NET_SKBUFF_DATA_USES_OFFSET
1378static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1386static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1379{ 1387{
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 50ced8aba9db..e4b3fb4bb77c 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -354,7 +354,13 @@ static inline void sk_psock_restore_proto(struct sock *sk,
354 sk->sk_write_space = psock->saved_write_space; 354 sk->sk_write_space = psock->saved_write_space;
355 355
356 if (psock->sk_proto) { 356 if (psock->sk_proto) {
357 sk->sk_prot = psock->sk_proto; 357 struct inet_connection_sock *icsk = inet_csk(sk);
358 bool has_ulp = !!icsk->icsk_ulp_data;
359
360 if (has_ulp)
361 tcp_update_ulp(sk, psock->sk_proto);
362 else
363 sk->sk_prot = psock->sk_proto;
358 psock->sk_proto = NULL; 364 psock->sk_proto = NULL;
359 } 365 }
360} 366}
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 97523818cb14..fc0bed59fc84 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -292,6 +292,9 @@ struct ucred {
292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ 292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
293#define MSG_EOF MSG_FIN 293#define MSG_EOF MSG_FIN
294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ 294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
295#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
296 * plain text and require encryption
297 */
295 298
296#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ 299#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
297#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 300#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index baa3ecdb882f..27536b961552 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
98 98
99struct rpc_call_ops { 99struct rpc_call_ops {
100 void (*rpc_call_prepare)(struct rpc_task *, void *); 100 void (*rpc_call_prepare)(struct rpc_task *, void *);
101 void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
102 void (*rpc_call_done)(struct rpc_task *, void *); 101 void (*rpc_call_done)(struct rpc_task *, void *);
103 void (*rpc_count_stats)(struct rpc_task *, void *); 102 void (*rpc_count_stats)(struct rpc_task *, void *);
104 void (*rpc_release)(void *); 103 void (*rpc_release)(void *);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 7acb953298a7..84ff2844df2a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,6 +57,7 @@ struct tk_read_base {
57 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds 59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
60 * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
60 * @cycle_interval: Number of clock cycles in one NTP interval 61 * @cycle_interval: Number of clock cycles in one NTP interval
61 * @xtime_interval: Number of clock shifted nano seconds in one NTP 62 * @xtime_interval: Number of clock shifted nano seconds in one NTP
62 * interval. 63 * interval.
@@ -84,6 +85,9 @@ struct tk_read_base {
84 * 85 *
85 * wall_to_monotonic is no longer the boot time, getboottime must be 86 * wall_to_monotonic is no longer the boot time, getboottime must be
86 * used instead. 87 * used instead.
88 *
89 * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
90 * accelerate the VDSO update for CLOCK_BOOTTIME.
87 */ 91 */
88struct timekeeper { 92struct timekeeper {
89 struct tk_read_base tkr_mono; 93 struct tk_read_base tkr_mono;
@@ -99,6 +103,7 @@ struct timekeeper {
99 u8 cs_was_changed_seq; 103 u8 cs_was_changed_seq;
100 ktime_t next_leap_ktime; 104 ktime_t next_leap_ktime;
101 u64 raw_sec; 105 u64 raw_sec;
106 struct timespec64 monotonic_to_boot;
102 107
103 /* The following members are for timekeeping internal use */ 108 /* The following members are for timekeeping internal use */
104 u64 cycle_interval; 109 u64 cycle_interval;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5150436783e8..30a8cdcfd4a4 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
548 548
549#define is_signed_type(type) (((type)(-1)) < (type)1) 549#define is_signed_type(type) (((type)(-1)) < (type)1)
550 550
551int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
551int trace_set_clr_event(const char *system, const char *event, int set); 552int trace_set_clr_event(const char *system, const char *event, int set);
552 553
553/* 554/*
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 83d35d993e8c..e87826e23d59 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1457,7 +1457,7 @@ typedef void (*usb_complete_t)(struct urb *);
1457 * field rather than determining a dma address themselves. 1457 * field rather than determining a dma address themselves.
1458 * 1458 *
1459 * Note that transfer_buffer must still be set if the controller 1459 * Note that transfer_buffer must still be set if the controller
1460 * does not support DMA (as indicated by bus.uses_dma) and when talking 1460 * does not support DMA (as indicated by hcd_uses_dma()) and when talking
1461 * to root hub. If you have to trasfer between highmem zone and the device 1461 * to root hub. If you have to trasfer between highmem zone and the device
1462 * on such controller, create a bounce buffer or bail out with an error. 1462 * on such controller, create a bounce buffer or bail out with an error.
1463 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA 1463 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index bab27ccc8ff5..a20e7815d814 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -422,6 +422,9 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
422 return hcd->high_prio_bh.completing_ep == ep; 422 return hcd->high_prio_bh.completing_ep == ep;
423} 423}
424 424
425#define hcd_uses_dma(hcd) \
426 (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma)
427
425extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 428extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
426extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, 429extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
427 int status); 430 int status);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index b6f77cf60dd7..30c515520fb2 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -127,6 +127,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head)
127} 127}
128 128
129/** 129/**
130 * wq_has_single_sleeper - check if there is only one sleeper
131 * @wq_head: wait queue head
132 *
133 * Returns true of wq_head has only one sleeper on the list.
134 *
135 * Please refer to the comment for waitqueue_active.
136 */
137static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
138{
139 return list_is_singular(&wq_head->head);
140}
141
142/**
130 * wq_has_sleeper - check if there are any waiting processes 143 * wq_has_sleeper - check if there are any waiting processes
131 * @wq_head: wait queue head 144 * @wq_head: wait queue head
132 * 145 *
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index f37d12877754..adcc6a97db61 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -308,6 +308,7 @@ do { \
308 \ 308 \
309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
310 R##_e = X##_e; \ 310 R##_e = X##_e; \
311 /* Fall through */ \
311 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ 312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 314 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
@@ -318,6 +319,7 @@ do { \
318 \ 319 \
319 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ 320 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
320 R##_e = Y##_e; \ 321 R##_e = Y##_e; \
322 /* Fall through */ \
321 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ 323 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
322 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 324 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
323 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 325 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
@@ -415,6 +417,7 @@ do { \
415 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 417 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
416 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 418 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
417 R##_s = X##_s; \ 419 R##_s = X##_s; \
420 /* Fall through */ \
418 \ 421 \
419 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ 422 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
420 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 423 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
@@ -428,6 +431,7 @@ do { \
428 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 431 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
429 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 432 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
430 R##_s = Y##_s; \ 433 R##_s = Y##_s; \
434 /* Fall through */ \
431 \ 435 \
432 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ 436 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
433 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 437 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
@@ -493,6 +497,7 @@ do { \
493 \ 497 \
494 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 498 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
495 FP_SET_EXCEPTION(FP_EX_DIVZERO); \ 499 FP_SET_EXCEPTION(FP_EX_DIVZERO); \
500 /* Fall through */ \
496 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ 501 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
497 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 502 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
498 R##_c = FP_CLS_INF; \ 503 R##_c = FP_CLS_INF; \
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c61a1bf4e3de..3a1a72990fce 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -15,6 +15,7 @@
15struct tcf_idrinfo { 15struct tcf_idrinfo {
16 struct mutex lock; 16 struct mutex lock;
17 struct idr action_idr; 17 struct idr action_idr;
18 struct net *net;
18}; 19};
19 20
20struct tc_action_ops; 21struct tc_action_ops;
@@ -108,7 +109,7 @@ struct tc_action_net {
108}; 109};
109 110
110static inline 111static inline
111int tc_action_net_init(struct tc_action_net *tn, 112int tc_action_net_init(struct net *net, struct tc_action_net *tn,
112 const struct tc_action_ops *ops) 113 const struct tc_action_ops *ops)
113{ 114{
114 int err = 0; 115 int err = 0;
@@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn,
117 if (!tn->idrinfo) 118 if (!tn->idrinfo)
118 return -ENOMEM; 119 return -ENOMEM;
119 tn->ops = ops; 120 tn->ops = ops;
121 tn->idrinfo->net = net;
120 mutex_init(&tn->idrinfo->lock); 122 mutex_init(&tn->idrinfo->lock);
121 idr_init(&tn->idrinfo->action_idr); 123 idr_init(&tn->idrinfo->action_idr);
122 return err; 124 return err;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index becdad576859..3f62b347b04a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
206 unsigned int len) 206 unsigned int len)
207{ 207{
208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) 208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
209 return -EINVAL; 209 return 0;
210 210
211 return pskb_may_pull(skb, len); 211 return pskb_may_pull(skb, len);
212} 212}
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ded574b32c20..ffc95b382eb5 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -278,6 +278,7 @@ struct hci_dev {
278 __u16 conn_info_min_age; 278 __u16 conn_info_min_age;
279 __u16 conn_info_max_age; 279 __u16 conn_info_max_age;
280 __u16 auth_payload_timeout; 280 __u16 auth_payload_timeout;
281 __u8 min_enc_key_size;
281 __u8 ssp_debug_mode; 282 __u8 ssp_debug_mode;
282 __u8 hw_error_code; 283 __u8 hw_error_code;
283 __u32 clock; 284 __u32 clock;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 88c27153a4bc..26e2ad2c7027 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4170,7 +4170,7 @@ struct sta_opmode_info {
4170 u8 rx_nss; 4170 u8 rx_nss;
4171}; 4171};
4172 4172
4173#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)ERR_PTR(-ENODATA)) 4173#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA))
4174 4174
4175/** 4175/**
4176 * struct wiphy_vendor_command - vendor command definition 4176 * struct wiphy_vendor_command - vendor command definition
@@ -7320,6 +7320,21 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
7320 struct cfg80211_pmsr_request *req, 7320 struct cfg80211_pmsr_request *req,
7321 gfp_t gfp); 7321 gfp_t gfp);
7322 7322
7323/**
7324 * cfg80211_iftype_allowed - check whether the interface can be allowed
7325 * @wiphy: the wiphy
7326 * @iftype: interface type
7327 * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
7328 * @check_swif: check iftype against software interfaces
7329 *
7330 * Check whether the interface is allowed to operate; additionally, this API
7331 * can be used to check iftype against the software interfaces when
7332 * check_swif is '1'.
7333 */
7334bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
7335 bool is_4addr, u8 check_swif);
7336
7337
7323/* Logging, debugging and troubleshooting/diagnostic helpers. */ 7338/* Logging, debugging and troubleshooting/diagnostic helpers. */
7324 7339
7325/* wiphy_printk helpers, similar to dev_printk */ 7340/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index db337299e81e..b16d21636d69 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -2,8 +2,8 @@
2#define _NET_FLOW_OFFLOAD_H 2#define _NET_FLOW_OFFLOAD_H
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/list.h>
5#include <net/flow_dissector.h> 6#include <net/flow_dissector.h>
6#include <net/sch_generic.h>
7 7
8struct flow_match { 8struct flow_match {
9 struct flow_dissector *dissector; 9 struct flow_dissector *dissector;
@@ -249,6 +249,10 @@ enum flow_block_binder_type {
249 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 249 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
250}; 250};
251 251
252struct flow_block {
253 struct list_head cb_list;
254};
255
252struct netlink_ext_ack; 256struct netlink_ext_ack;
253 257
254struct flow_block_offload { 258struct flow_block_offload {
@@ -256,29 +260,33 @@ struct flow_block_offload {
256 enum flow_block_binder_type binder_type; 260 enum flow_block_binder_type binder_type;
257 bool block_shared; 261 bool block_shared;
258 struct net *net; 262 struct net *net;
263 struct flow_block *block;
259 struct list_head cb_list; 264 struct list_head cb_list;
260 struct list_head *driver_block_list; 265 struct list_head *driver_block_list;
261 struct netlink_ext_ack *extack; 266 struct netlink_ext_ack *extack;
262}; 267};
263 268
269enum tc_setup_type;
270typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
271 void *cb_priv);
272
264struct flow_block_cb { 273struct flow_block_cb {
265 struct list_head driver_list; 274 struct list_head driver_list;
266 struct list_head list; 275 struct list_head list;
267 struct net *net; 276 flow_setup_cb_t *cb;
268 tc_setup_cb_t *cb;
269 void *cb_ident; 277 void *cb_ident;
270 void *cb_priv; 278 void *cb_priv;
271 void (*release)(void *cb_priv); 279 void (*release)(void *cb_priv);
272 unsigned int refcnt; 280 unsigned int refcnt;
273}; 281};
274 282
275struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, 283struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
276 void *cb_ident, void *cb_priv, 284 void *cb_ident, void *cb_priv,
277 void (*release)(void *cb_priv)); 285 void (*release)(void *cb_priv));
278void flow_block_cb_free(struct flow_block_cb *block_cb); 286void flow_block_cb_free(struct flow_block_cb *block_cb);
279 287
280struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload, 288struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
281 tc_setup_cb_t *cb, void *cb_ident); 289 flow_setup_cb_t *cb, void *cb_ident);
282 290
283void *flow_block_cb_priv(struct flow_block_cb *block_cb); 291void *flow_block_cb_priv(struct flow_block_cb *block_cb);
284void flow_block_cb_incref(struct flow_block_cb *block_cb); 292void flow_block_cb_incref(struct flow_block_cb *block_cb);
@@ -296,11 +304,12 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
296 list_move(&block_cb->list, &offload->cb_list); 304 list_move(&block_cb->list, &offload->cb_list);
297} 305}
298 306
299bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, 307bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
300 struct list_head *driver_block_list); 308 struct list_head *driver_block_list);
301 309
302int flow_block_cb_setup_simple(struct flow_block_offload *f, 310int flow_block_cb_setup_simple(struct flow_block_offload *f,
303 struct list_head *driver_list, tc_setup_cb_t *cb, 311 struct list_head *driver_list,
312 flow_setup_cb_t *cb,
304 void *cb_ident, void *cb_priv, bool ingress_only); 313 void *cb_ident, void *cb_priv, bool ingress_only);
305 314
306enum flow_cls_command { 315enum flow_cls_command {
@@ -333,4 +342,9 @@ flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
333 return flow_cmd->rule; 342 return flow_cmd->rule;
334} 343}
335 344
345static inline void flow_block_init(struct flow_block *flow_block)
346{
347 INIT_LIST_HEAD(&flow_block->cb_list);
348}
349
336#endif /* _NET_FLOW_OFFLOAD_H */ 350#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 010f26b31c89..bac79e817776 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -171,7 +171,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, 171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
172 struct sk_buff *parent); 172 struct sk_buff *parent);
173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
174 void *reasm_data); 174 void *reasm_data, bool try_coalesce);
175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); 175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
176 176
177#endif 177#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 4a9da951a794..ab40d7afdc54 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,7 +52,7 @@ struct bpf_prog;
52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
53 53
54struct net { 54struct net {
55 refcount_t passive; /* To decided when the network 55 refcount_t passive; /* To decide when the network
56 * namespace should be freed. 56 * namespace should be freed.
57 */ 57 */
58 refcount_t count; /* To decided when the network 58 refcount_t count; /* To decided when the network
@@ -61,7 +61,6 @@ struct net {
61 spinlock_t rules_mod_lock; 61 spinlock_t rules_mod_lock;
62 62
63 u32 hash_mix; 63 u32 hash_mix;
64 atomic64_t cookie_gen;
65 64
66 struct list_head list; /* list of network namespaces */ 65 struct list_head list; /* list of network namespaces */
67 struct list_head exit_list; /* To linked to call pernet exit 66 struct list_head exit_list; /* To linked to call pernet exit
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 93ce6b0daaba..573429be4d59 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -76,6 +76,11 @@ struct nf_conntrack_expect_policy {
76#define NF_CT_EXPECT_CLASS_DEFAULT 0 76#define NF_CT_EXPECT_CLASS_DEFAULT 0
77#define NF_CT_EXPECT_MAX_CNT 255 77#define NF_CT_EXPECT_MAX_CNT 255
78 78
79/* Allow to reuse expectations with the same tuples from different master
80 * conntracks.
81 */
82#define NF_CT_EXP_F_SKIP_MASTER 0x1
83
79int nf_conntrack_expect_pernet_init(struct net *net); 84int nf_conntrack_expect_pernet_init(struct net *net);
80void nf_conntrack_expect_pernet_fini(struct net *net); 85void nf_conntrack_expect_pernet_fini(struct net *net);
81 86
@@ -122,10 +127,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
122 u_int8_t, const __be16 *, const __be16 *); 127 u_int8_t, const __be16 *, const __be16 *);
123void nf_ct_expect_put(struct nf_conntrack_expect *exp); 128void nf_ct_expect_put(struct nf_conntrack_expect *exp);
124int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 129int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
125 u32 portid, int report); 130 u32 portid, int report, unsigned int flags);
126static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect) 131static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect,
132 unsigned int flags)
127{ 133{
128 return nf_ct_expect_related_report(expect, 0, 0); 134 return nf_ct_expect_related_report(expect, 0, 0, flags);
129} 135}
130 136
131#endif /*_NF_CONNTRACK_EXPECT_H*/ 137#endif /*_NF_CONNTRACK_EXPECT_H*/
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 8f00125b06f4..44513b93bd55 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -68,6 +68,7 @@ struct synproxy_options {
68 u8 options; 68 u8 options;
69 u8 wscale; 69 u8 wscale;
70 u16 mss; 70 u16 mss;
71 u16 mss_encode;
71 u32 tsval; 72 u32 tsval;
72 u32 tsecr; 73 u32 tsecr;
73}; 74};
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 35dfdd9f69b3..475d6f28ca67 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -11,6 +11,7 @@
11#include <linux/rhashtable.h> 11#include <linux/rhashtable.h>
12#include <net/netfilter/nf_flow_table.h> 12#include <net/netfilter/nf_flow_table.h>
13#include <net/netlink.h> 13#include <net/netlink.h>
14#include <net/flow_offload.h>
14 15
15struct module; 16struct module;
16 17
@@ -420,8 +421,7 @@ struct nft_set {
420 unsigned char *udata; 421 unsigned char *udata;
421 /* runtime data below here */ 422 /* runtime data below here */
422 const struct nft_set_ops *ops ____cacheline_aligned; 423 const struct nft_set_ops *ops ____cacheline_aligned;
423 u16 flags:13, 424 u16 flags:14,
424 bound:1,
425 genmask:2; 425 genmask:2;
426 u8 klen; 426 u8 klen;
427 u8 dlen; 427 u8 dlen;
@@ -951,7 +951,7 @@ struct nft_stats {
951 * @stats: per-cpu chain stats 951 * @stats: per-cpu chain stats
952 * @chain: the chain 952 * @chain: the chain
953 * @dev_name: device name that this base chain is attached to (if any) 953 * @dev_name: device name that this base chain is attached to (if any)
954 * @cb_list: list of flow block callbacks (for hardware offload) 954 * @flow_block: flow block (for hardware offload)
955 */ 955 */
956struct nft_base_chain { 956struct nft_base_chain {
957 struct nf_hook_ops ops; 957 struct nf_hook_ops ops;
@@ -961,7 +961,7 @@ struct nft_base_chain {
961 struct nft_stats __percpu *stats; 961 struct nft_stats __percpu *stats;
962 struct nft_chain chain; 962 struct nft_chain chain;
963 char dev_name[IFNAMSIZ]; 963 char dev_name[IFNAMSIZ];
964 struct list_head cb_list; 964 struct flow_block flow_block;
965}; 965};
966 966
967static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain) 967static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
@@ -1347,12 +1347,15 @@ struct nft_trans_rule {
1347struct nft_trans_set { 1347struct nft_trans_set {
1348 struct nft_set *set; 1348 struct nft_set *set;
1349 u32 set_id; 1349 u32 set_id;
1350 bool bound;
1350}; 1351};
1351 1352
1352#define nft_trans_set(trans) \ 1353#define nft_trans_set(trans) \
1353 (((struct nft_trans_set *)trans->data)->set) 1354 (((struct nft_trans_set *)trans->data)->set)
1354#define nft_trans_set_id(trans) \ 1355#define nft_trans_set_id(trans) \
1355 (((struct nft_trans_set *)trans->data)->set_id) 1356 (((struct nft_trans_set *)trans->data)->set_id)
1357#define nft_trans_set_bound(trans) \
1358 (((struct nft_trans_set *)trans->data)->bound)
1356 1359
1357struct nft_trans_chain { 1360struct nft_trans_chain {
1358 bool update; 1361 bool update;
@@ -1383,12 +1386,15 @@ struct nft_trans_table {
1383struct nft_trans_elem { 1386struct nft_trans_elem {
1384 struct nft_set *set; 1387 struct nft_set *set;
1385 struct nft_set_elem elem; 1388 struct nft_set_elem elem;
1389 bool bound;
1386}; 1390};
1387 1391
1388#define nft_trans_elem_set(trans) \ 1392#define nft_trans_elem_set(trans) \
1389 (((struct nft_trans_elem *)trans->data)->set) 1393 (((struct nft_trans_elem *)trans->data)->set)
1390#define nft_trans_elem(trans) \ 1394#define nft_trans_elem(trans) \
1391 (((struct nft_trans_elem *)trans->data)->elem) 1395 (((struct nft_trans_elem *)trans->data)->elem)
1396#define nft_trans_elem_set_bound(trans) \
1397 (((struct nft_trans_elem *)trans->data)->bound)
1392 1398
1393struct nft_trans_obj { 1399struct nft_trans_obj {
1394 struct nft_object *obj; 1400 struct nft_object *obj;
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 3196663a10e3..c8b9dec376f5 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -73,4 +73,6 @@ int nft_flow_rule_offload_commit(struct net *net);
73 (__reg)->key = __key; \ 73 (__reg)->key = __key; \
74 memset(&(__reg)->mask, 0xff, (__reg)->len); 74 memset(&(__reg)->mask, 0xff, (__reg)->len);
75 75
76int nft_chain_offload_priority(struct nft_base_chain *basechain);
77
76#endif 78#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e4650e5b64a1..b140c8f1be22 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -684,9 +684,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
684 const struct nla_policy *policy, 684 const struct nla_policy *policy,
685 struct netlink_ext_ack *extack) 685 struct netlink_ext_ack *extack)
686{ 686{
687 return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), 687 return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
688 nlmsg_attrlen(nlh, hdrlen), policy, 688 NL_VALIDATE_STRICT, extack);
689 NL_VALIDATE_STRICT, extack);
690} 689}
691 690
692/** 691/**
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 25f1f9a8419b..95f766c31c90 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
141 141
142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp);
143 rc = nh_grp->num_nh; 143 rc = nh_grp->num_nh;
144 } else {
145 const struct nh_info *nhi;
146
147 nhi = rcu_dereference_rtnl(nh->nh_info);
148 if (nhi->reject_nh)
149 rc = 0;
150 } 144 }
151 145
152 return rc; 146 return rc;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 841faadceb6e..98be18ef1ed3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -6,7 +6,6 @@
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <net/sch_generic.h> 7#include <net/sch_generic.h>
8#include <net/act_api.h> 8#include <net/act_api.h>
9#include <net/flow_offload.h>
10#include <net/net_namespace.h> 9#include <net/net_namespace.h>
11 10
12/* TC action not accessible from user space */ 11/* TC action not accessible from user space */
@@ -126,14 +125,14 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
126} 125}
127 126
128static inline 127static inline
129int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, 128int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
130 void *cb_priv) 129 void *cb_priv)
131{ 130{
132 return 0; 131 return 0;
133} 132}
134 133
135static inline 134static inline
136void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, 135void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
137 void *cb_priv) 136 void *cb_priv)
138{ 137{
139} 138}
@@ -647,7 +646,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
647{ 646{
648 cls_common->chain_index = tp->chain->index; 647 cls_common->chain_index = tp->chain->index;
649 cls_common->protocol = tp->protocol; 648 cls_common->protocol = tp->protocol;
650 cls_common->prio = tp->prio; 649 cls_common->prio = tp->prio >> 16;
651 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
652 cls_common->extack = extack; 651 cls_common->extack = extack;
653} 652}
diff --git a/include/net/psample.h b/include/net/psample.h
index 37a4df2325b2..6b578ce69cd8 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -11,6 +11,7 @@ struct psample_group {
11 u32 group_num; 11 u32 group_num;
12 u32 refcount; 12 u32 refcount;
13 u32 seq; 13 u32 seq;
14 struct rcu_head rcu;
14}; 15};
15 16
16struct psample_group *psample_group_get(struct net *net, u32 group_num); 17struct psample_group *psample_group_get(struct net *net, u32 group_num);
diff --git a/include/net/route.h b/include/net/route.h
index 630a0493f1f3..dfce19c9fa96 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
233 233
234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
235 u32 table_id, struct fib_info *fi, 235 u32 table_id, struct fib_info *fi,
236 int *fa_index, int fa_start); 236 int *fa_index, int fa_start, unsigned int flags);
237 237
238static inline void ip_rt_put(struct rtable *rt) 238static inline void ip_rt_put(struct rtable *rt)
239{ 239{
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 855167bbc372..6b6b01234dd9 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -15,6 +15,7 @@
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <net/gen_stats.h> 16#include <net/gen_stats.h>
17#include <net/rtnetlink.h> 17#include <net/rtnetlink.h>
18#include <net/flow_offload.h>
18 19
19struct Qdisc_ops; 20struct Qdisc_ops;
20struct qdisc_walker; 21struct qdisc_walker;
@@ -22,9 +23,6 @@ struct tcf_walker;
22struct module; 23struct module;
23struct bpf_flow_keys; 24struct bpf_flow_keys;
24 25
25typedef int tc_setup_cb_t(enum tc_setup_type type,
26 void *type_data, void *cb_priv);
27
28typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, 26typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
29 enum tc_setup_type type, void *type_data); 27 enum tc_setup_type type, void *type_data);
30 28
@@ -313,7 +311,7 @@ struct tcf_proto_ops {
313 void (*walk)(struct tcf_proto *tp, 311 void (*walk)(struct tcf_proto *tp,
314 struct tcf_walker *arg, bool rtnl_held); 312 struct tcf_walker *arg, bool rtnl_held);
315 int (*reoffload)(struct tcf_proto *tp, bool add, 313 int (*reoffload)(struct tcf_proto *tp, bool add,
316 tc_setup_cb_t *cb, void *cb_priv, 314 flow_setup_cb_t *cb, void *cb_priv,
317 struct netlink_ext_ack *extack); 315 struct netlink_ext_ack *extack);
318 void (*bind_class)(void *, u32, unsigned long); 316 void (*bind_class)(void *, u32, unsigned long);
319 void * (*tmplt_create)(struct net *net, 317 void * (*tmplt_create)(struct net *net,
@@ -401,7 +399,7 @@ struct tcf_block {
401 refcount_t refcnt; 399 refcount_t refcnt;
402 struct net *net; 400 struct net *net;
403 struct Qdisc *q; 401 struct Qdisc *q;
404 struct list_head cb_list; 402 struct flow_block flow_block;
405 struct list_head owner_list; 403 struct list_head owner_list;
406 bool keep_dst; 404 bool keep_dst;
407 unsigned int offloadcnt; /* Number of oddloaded filters */ 405 unsigned int offloadcnt; /* Number of oddloaded filters */
diff --git a/include/net/sock.h b/include/net/sock.h
index 228db3998e46..2c53f1a1d905 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk)
2482 2482
2483/* Checks if this SKB belongs to an HW offloaded socket 2483/* Checks if this SKB belongs to an HW offloaded socket
2484 * and whether any SW fallbacks are required based on dev. 2484 * and whether any SW fallbacks are required based on dev.
2485 * Check decrypted mark in case skb_orphan() cleared socket.
2485 */ 2486 */
2486static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, 2487static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2487 struct net_device *dev) 2488 struct net_device *dev)
@@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2489#ifdef CONFIG_SOCK_VALIDATE_XMIT 2490#ifdef CONFIG_SOCK_VALIDATE_XMIT
2490 struct sock *sk = skb->sk; 2491 struct sock *sk = skb->sk;
2491 2492
2492 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) 2493 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2493 skb = sk->sk_validate_xmit_skb(sk, dev, skb); 2494 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2495#ifdef CONFIG_TLS_DEVICE
2496 } else if (unlikely(skb->decrypted)) {
2497 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2498 kfree_skb(skb);
2499 skb = NULL;
2500#endif
2501 }
2494#endif 2502#endif
2495 2503
2496 return skb; 2504 return skb;
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 8b9ef3664262..cfdc7cb82cad 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -54,7 +54,7 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
54 struct tcf_police *police = to_police(act); 54 struct tcf_police *police = to_police(act);
55 struct tcf_police_params *params; 55 struct tcf_police_params *params;
56 56
57 params = rcu_dereference_bh(police->params); 57 params = rcu_dereference_bh_rtnl(police->params);
58 return params->rate.rate_bytes_ps; 58 return params->rate.rate_bytes_ps;
59} 59}
60 60
@@ -63,7 +63,7 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
63 struct tcf_police *police = to_police(act); 63 struct tcf_police *police = to_police(act);
64 struct tcf_police_params *params; 64 struct tcf_police_params *params;
65 65
66 params = rcu_dereference_bh(police->params); 66 params = rcu_dereference_bh_rtnl(police->params);
67 return params->tcfp_burst; 67 return params->tcfp_burst;
68} 68}
69 69
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index 0a559d4b6f0f..b4fce0fae645 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -44,7 +44,7 @@ static inline int tcf_sample_trunc_size(const struct tc_action *a)
44static inline struct psample_group * 44static inline struct psample_group *
45tcf_sample_psample_group(const struct tc_action *a) 45tcf_sample_psample_group(const struct tc_action *a)
46{ 46{
47 return rcu_dereference(to_sample(a)->psample_group); 47 return rcu_dereference_rtnl(to_sample(a)->psample_group);
48} 48}
49 49
50#endif /* __NET_TC_SAMPLE_H */ 50#endif /* __NET_TC_SAMPLE_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f42d300f0cfa..81e8ade1e6e4 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1709,6 +1709,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1709 return skb_rb_first(&sk->tcp_rtx_queue); 1709 return skb_rb_first(&sk->tcp_rtx_queue);
1710} 1710}
1711 1711
1712static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1713{
1714 return skb_rb_last(&sk->tcp_rtx_queue);
1715}
1716
1712static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) 1717static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1713{ 1718{
1714 return skb_peek(&sk->sk_write_queue); 1719 return skb_peek(&sk->sk_write_queue);
@@ -2103,6 +2108,8 @@ struct tcp_ulp_ops {
2103 2108
2104 /* initialize ulp */ 2109 /* initialize ulp */
2105 int (*init)(struct sock *sk); 2110 int (*init)(struct sock *sk);
2111 /* update ulp */
2112 void (*update)(struct sock *sk, struct proto *p);
2106 /* cleanup ulp */ 2113 /* cleanup ulp */
2107 void (*release)(struct sock *sk); 2114 void (*release)(struct sock *sk);
2108 2115
@@ -2114,6 +2121,7 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2114int tcp_set_ulp(struct sock *sk, const char *name); 2121int tcp_set_ulp(struct sock *sk, const char *name);
2115void tcp_get_available_ulp(char *buf, size_t len); 2122void tcp_get_available_ulp(char *buf, size_t len);
2116void tcp_cleanup_ulp(struct sock *sk); 2123void tcp_cleanup_ulp(struct sock *sk);
2124void tcp_update_ulp(struct sock *sk, struct proto *p);
2117 2125
2118#define MODULE_ALIAS_TCP_ULP(name) \ 2126#define MODULE_ALIAS_TCP_ULP(name) \
2119 __MODULE_INFO(alias, alias_userspace, name); \ 2127 __MODULE_INFO(alias, alias_userspace, name); \
diff --git a/include/net/tls.h b/include/net/tls.h
index 584609174fe0..41b2d41bb1b8 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -107,9 +107,7 @@ struct tls_device {
107enum { 107enum {
108 TLS_BASE, 108 TLS_BASE,
109 TLS_SW, 109 TLS_SW,
110#ifdef CONFIG_TLS_DEVICE
111 TLS_HW, 110 TLS_HW,
112#endif
113 TLS_HW_RECORD, 111 TLS_HW_RECORD,
114 TLS_NUM_CONFIG, 112 TLS_NUM_CONFIG,
115}; 113};
@@ -162,6 +160,7 @@ struct tls_sw_context_tx {
162 int async_capable; 160 int async_capable;
163 161
164#define BIT_TX_SCHEDULED 0 162#define BIT_TX_SCHEDULED 0
163#define BIT_TX_CLOSING 1
165 unsigned long tx_bitmask; 164 unsigned long tx_bitmask;
166}; 165};
167 166
@@ -272,6 +271,8 @@ struct tls_context {
272 unsigned long flags; 271 unsigned long flags;
273 272
274 /* cache cold stuff */ 273 /* cache cold stuff */
274 struct proto *sk_proto;
275
275 void (*sk_destruct)(struct sock *sk); 276 void (*sk_destruct)(struct sock *sk);
276 void (*sk_proto_close)(struct sock *sk, long timeout); 277 void (*sk_proto_close)(struct sock *sk, long timeout);
277 278
@@ -355,13 +356,17 @@ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
355 unsigned int optlen); 356 unsigned int optlen);
356 357
357int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 358int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
359void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
360void tls_sw_strparser_done(struct tls_context *tls_ctx);
358int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 361int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
359int tls_sw_sendpage(struct sock *sk, struct page *page, 362int tls_sw_sendpage(struct sock *sk, struct page *page,
360 int offset, size_t size, int flags); 363 int offset, size_t size, int flags);
361void tls_sw_close(struct sock *sk, long timeout); 364void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
362void tls_sw_free_resources_tx(struct sock *sk); 365void tls_sw_release_resources_tx(struct sock *sk);
366void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
363void tls_sw_free_resources_rx(struct sock *sk); 367void tls_sw_free_resources_rx(struct sock *sk);
364void tls_sw_release_resources_rx(struct sock *sk); 368void tls_sw_release_resources_rx(struct sock *sk);
369void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
365int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 370int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
366 int nonblock, int flags, int *addr_len); 371 int nonblock, int flags, int *addr_len);
367bool tls_sw_stream_read(const struct sock *sk); 372bool tls_sw_stream_read(const struct sock *sk);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c5f8a9f17063..4f225175cb91 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2647,7 +2647,9 @@ struct ib_client {
2647 const union ib_gid *gid, 2647 const union ib_gid *gid,
2648 const struct sockaddr *addr, 2648 const struct sockaddr *addr,
2649 void *client_data); 2649 void *client_data);
2650 struct list_head list; 2650
2651 refcount_t uses;
2652 struct completion uses_zero;
2651 u32 client_id; 2653 u32 client_id;
2652 2654
2653 /* kverbs are not required by the client */ 2655 /* kverbs are not required by the client */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0eeea520a853..e06c77d76463 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
608/** 608/**
609 * rvt_qp_wqe_unreserve - clean reserved operation 609 * rvt_qp_wqe_unreserve - clean reserved operation
610 * @qp - the rvt qp 610 * @qp - the rvt qp
611 * @wqe - the send wqe 611 * @flags - send wqe flags
612 * 612 *
613 * This decrements the reserve use count. 613 * This decrements the reserve use count.
614 * 614 *
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
620 * the compiler does not juggle the order of the s_last 620 * the compiler does not juggle the order of the s_last
621 * ring index and the decrementing of s_reserved_used. 621 * ring index and the decrementing of s_reserved_used.
622 */ 622 */
623static inline void rvt_qp_wqe_unreserve( 623static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
624 struct rvt_qp *qp,
625 struct rvt_swqe *wqe)
626{ 624{
627 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { 625 if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
628 atomic_dec(&qp->s_reserved_used); 626 atomic_dec(&qp->s_reserved_used);
629 /* insure no compiler re-order up to s_last change */ 627 /* insure no compiler re-order up to s_last change */
630 smp_mb__after_atomic(); 628 smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
853 u32 byte_len, last; 851 u32 byte_len, last;
854 int flags = wqe->wr.send_flags; 852 int flags = wqe->wr.send_flags;
855 853
854 rvt_qp_wqe_unreserve(qp, flags);
856 rvt_put_qp_swqe(qp, wqe); 855 rvt_put_qp_swqe(qp, wqe);
857 856
858 need_completion = 857 need_completion =
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index b0fc6b26bdf5..83df1ec6664e 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -105,8 +105,7 @@ struct rdma_restrack_entry {
105}; 105};
106 106
107int rdma_restrack_count(struct ib_device *dev, 107int rdma_restrack_count(struct ib_device *dev,
108 enum rdma_restrack_type type, 108 enum rdma_restrack_type type);
109 struct pid_namespace *ns);
110 109
111void rdma_restrack_kadd(struct rdma_restrack_entry *res); 110void rdma_restrack_kadd(struct rdma_restrack_entry *res);
112void rdma_restrack_uadd(struct rdma_restrack_entry *res); 111void rdma_restrack_uadd(struct rdma_restrack_entry *res);
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 2d64b53f947c..9b87e1a1c646 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -115,7 +115,7 @@ struct fc_disc_port {
115 struct fc_lport *lp; 115 struct fc_lport *lp;
116 struct list_head peers; 116 struct list_head peers;
117 struct work_struct rport_work; 117 struct work_struct rport_work;
118 u32 port_id; 118 u32 port_id;
119}; 119};
120 120
121/** 121/**
@@ -155,14 +155,14 @@ struct fc_rport_operations {
155 */ 155 */
156struct fc_rport_libfc_priv { 156struct fc_rport_libfc_priv {
157 struct fc_lport *local_port; 157 struct fc_lport *local_port;
158 enum fc_rport_state rp_state; 158 enum fc_rport_state rp_state;
159 u16 flags; 159 u16 flags;
160 #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) 160 #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
161 #define FC_RP_FLAGS_RETRY (1 << 1) 161 #define FC_RP_FLAGS_RETRY (1 << 1)
162 #define FC_RP_STARTED (1 << 2) 162 #define FC_RP_STARTED (1 << 2)
163 #define FC_RP_FLAGS_CONF_REQ (1 << 3) 163 #define FC_RP_FLAGS_CONF_REQ (1 << 3)
164 unsigned int e_d_tov; 164 unsigned int e_d_tov;
165 unsigned int r_a_tov; 165 unsigned int r_a_tov;
166}; 166};
167 167
168/** 168/**
@@ -191,24 +191,24 @@ struct fc_rport_priv {
191 struct fc_lport *local_port; 191 struct fc_lport *local_port;
192 struct fc_rport *rport; 192 struct fc_rport *rport;
193 struct kref kref; 193 struct kref kref;
194 enum fc_rport_state rp_state; 194 enum fc_rport_state rp_state;
195 struct fc_rport_identifiers ids; 195 struct fc_rport_identifiers ids;
196 u16 flags; 196 u16 flags;
197 u16 max_seq; 197 u16 max_seq;
198 u16 disc_id; 198 u16 disc_id;
199 u16 maxframe_size; 199 u16 maxframe_size;
200 unsigned int retries; 200 unsigned int retries;
201 unsigned int major_retries; 201 unsigned int major_retries;
202 unsigned int e_d_tov; 202 unsigned int e_d_tov;
203 unsigned int r_a_tov; 203 unsigned int r_a_tov;
204 struct mutex rp_mutex; 204 struct mutex rp_mutex;
205 struct delayed_work retry_work; 205 struct delayed_work retry_work;
206 enum fc_rport_event event; 206 enum fc_rport_event event;
207 struct fc_rport_operations *ops; 207 struct fc_rport_operations *ops;
208 struct list_head peers; 208 struct list_head peers;
209 struct work_struct event_work; 209 struct work_struct event_work;
210 u32 supported_classes; 210 u32 supported_classes;
211 u16 prli_count; 211 u16 prli_count;
212 struct rcu_head rcu; 212 struct rcu_head rcu;
213 u16 sp_features; 213 u16 sp_features;
214 u8 spp_type; 214 u8 spp_type;
@@ -618,12 +618,12 @@ struct libfc_function_template {
618 * @disc_callback: Callback routine called when discovery completes 618 * @disc_callback: Callback routine called when discovery completes
619 */ 619 */
620struct fc_disc { 620struct fc_disc {
621 unsigned char retry_count; 621 unsigned char retry_count;
622 unsigned char pending; 622 unsigned char pending;
623 unsigned char requested; 623 unsigned char requested;
624 unsigned short seq_count; 624 unsigned short seq_count;
625 unsigned char buf_len; 625 unsigned char buf_len;
626 u16 disc_id; 626 u16 disc_id;
627 627
628 struct list_head rports; 628 struct list_head rports;
629 void *priv; 629 void *priv;
@@ -697,7 +697,7 @@ struct fc_lport {
697 struct fc_rport_priv *ms_rdata; 697 struct fc_rport_priv *ms_rdata;
698 struct fc_rport_priv *ptp_rdata; 698 struct fc_rport_priv *ptp_rdata;
699 void *scsi_priv; 699 void *scsi_priv;
700 struct fc_disc disc; 700 struct fc_disc disc;
701 701
702 /* Virtual port information */ 702 /* Virtual port information */
703 struct list_head vports; 703 struct list_head vports;
@@ -715,7 +715,7 @@ struct fc_lport {
715 u8 retry_count; 715 u8 retry_count;
716 716
717 /* Fabric information */ 717 /* Fabric information */
718 u32 port_id; 718 u32 port_id;
719 u64 wwpn; 719 u64 wwpn;
720 u64 wwnn; 720 u64 wwnn;
721 unsigned int service_params; 721 unsigned int service_params;
@@ -743,11 +743,11 @@ struct fc_lport {
743 struct fc_ns_fts fcts; 743 struct fc_ns_fts fcts;
744 744
745 /* Miscellaneous */ 745 /* Miscellaneous */
746 struct mutex lp_mutex; 746 struct mutex lp_mutex;
747 struct list_head list; 747 struct list_head list;
748 struct delayed_work retry_work; 748 struct delayed_work retry_work;
749 void *prov[FC_FC4_PROV_SIZE]; 749 void *prov[FC_FC4_PROV_SIZE];
750 struct list_head lport_list; 750 struct list_head lport_list;
751}; 751};
752 752
753/** 753/**
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c50fb297e265..2568cb0627ec 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -31,7 +31,7 @@
31 * FIP tunable parameters. 31 * FIP tunable parameters.
32 */ 32 */
33#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */ 33#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
34#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */ 34#define FCOE_CTLR_SOL_TOV 2000 /* min. solicitation interval (mS) */
35#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */ 35#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */
36#define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */ 36#define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */
37 37
@@ -229,6 +229,7 @@ struct fcoe_fcf {
229 * @vn_mac: VN_Node assigned MAC address for data 229 * @vn_mac: VN_Node assigned MAC address for data
230 */ 230 */
231struct fcoe_rport { 231struct fcoe_rport {
232 struct fc_rport_priv rdata;
232 unsigned long time; 233 unsigned long time;
233 u16 fcoe_len; 234 u16 fcoe_len;
234 u16 flags; 235 u16 flags;
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 50f49e043668..d1a93c73f006 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -46,7 +46,9 @@ struct mcip_cmd {
46#define CMD_IDU_ENABLE 0x71 46#define CMD_IDU_ENABLE 0x71
47#define CMD_IDU_DISABLE 0x72 47#define CMD_IDU_DISABLE 0x72
48#define CMD_IDU_SET_MODE 0x74 48#define CMD_IDU_SET_MODE 0x74
49#define CMD_IDU_READ_MODE 0x75
49#define CMD_IDU_SET_DEST 0x76 50#define CMD_IDU_SET_DEST 0x76
51#define CMD_IDU_ACK_CIRQ 0x79
50#define CMD_IDU_SET_MASK 0x7C 52#define CMD_IDU_SET_MASK 0x7C
51 53
52#define IDU_M_TRIG_LEVEL 0x0 54#define IDU_M_TRIG_LEVEL 0x0
@@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
119 __mcip_cmd(cmd, param); 121 __mcip_cmd(cmd, param);
120} 122}
121 123
124/*
125 * Read MCIP register
126 */
127static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
128{
129 __mcip_cmd(cmd, param);
130 return read_aux_reg(ARC_REG_MCIP_READBACK);
131}
132
122#endif 133#endif
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 3f9d6b6a5691..c1036d16ed03 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -259,7 +259,7 @@ static inline int qe_alive_during_sleep(void)
259 259
260/* Structure that defines QE firmware binary files. 260/* Structure that defines QE firmware binary files.
261 * 261 *
262 * See Documentation/powerpc/qe_firmware.txt for a description of these 262 * See Documentation/powerpc/qe_firmware.rst for a description of these
263 * fields. 263 * fields.
264 */ 264 */
265struct qe_firmware { 265struct qe_firmware {
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index c5188ff724d1..bc88d6f964da 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
173 if (snd_BUG_ON(!stream)) 173 if (snd_BUG_ON(!stream))
174 return; 174 return;
175 175
176 if (stream->direction == SND_COMPRESS_PLAYBACK) 176 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
177 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
178 else
179 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
180 177
181 wake_up(&stream->runtime->sleep); 178 wake_up(&stream->runtime->sleep);
182} 179}
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 954563ee2277..985a5f583de4 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -141,6 +141,10 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
141{ 141{
142 struct device *dev = simple_priv_to_dev(priv); 142 struct device *dev = simple_priv_to_dev(priv);
143 143
144 /* dai might be NULL */
145 if (!dai)
146 return;
147
144 if (dai->name) 148 if (dai->name)
145 dev_dbg(dev, "%s dai name = %s\n", 149 dev_dbg(dev, "%s dai name = %s\n",
146 name, dai->name); 150 name, dai->name);
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
index bded69e696d4..6080ea0facd7 100644
--- a/include/sound/sof/control.h
+++ b/include/sound/sof/control.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 4bb8ee138ba7..65e4c20e567c 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 3d174e20aa53..5b8de1b1983c 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 12867bbd4372..10f00c08dbb7 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 16528d2b4a50..a9156b4a062c 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 8ae3ad45bdf7..003879401d63 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 643f175cb479..0b71b381b952 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 41dcabf89899..c47b36240920 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h
index 9257d5473d97..fda6e8f6ead4 100644
--- a/include/sound/sof/trace.h
+++ b/include/sound/sof/trace.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h
index d25c764b10e8..dd53d36b34e1 100644
--- a/include/sound/sof/xtensa.h
+++ b/include/sound/sof/xtensa.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* 2/*
3 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license. 4 * redistributing this file, you may do so under either license.
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 2212adda8f77..64e92d56c6a8 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -2,7 +2,7 @@
2#undef TRACE_SYSTEM 2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM dma_fence 3#define TRACE_SYSTEM dma_fence
4 4
5#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) 5#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_DMA_FENCE_H 6#define _TRACE_DMA_FENCE_H
7 7
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index f3a12566bed0..6678cf8b235b 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -3,7 +3,7 @@
3#define TRACE_SYSTEM napi 3#define TRACE_SYSTEM napi
4 4
5#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) 5#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_NAPI_H_ 6#define _TRACE_NAPI_H
7 7
8#include <linux/netdevice.h> 8#include <linux/netdevice.h>
9#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
38 38
39#undef NO_DEV 39#undef NO_DEV
40 40
41#endif /* _TRACE_NAPI_H_ */ 41#endif /* _TRACE_NAPI_H */
42 42
43/* This part must be outside protection */ 43/* This part must be outside protection */
44#include <trace/define_trace.h> 44#include <trace/define_trace.h>
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 60d0d8bd336d..0d1a9ebf55ba 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -2,7 +2,7 @@
2#define TRACE_SYSTEM qdisc 2#define TRACE_SYSTEM qdisc
3 3
4#if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_QDISC_H_ 5#define _TRACE_QDISC_H
6 6
7#include <linux/skbuff.h> 7#include <linux/skbuff.h>
8#include <linux/netdevice.h> 8#include <linux/netdevice.h>
@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
44 __entry->txq_state, __entry->packets, __entry->skbaddr ) 44 __entry->txq_state, __entry->packets, __entry->skbaddr )
45); 45);
46 46
47#endif /* _TRACE_QDISC_H_ */ 47#endif /* _TRACE_QDISC_H */
48 48
49/* This part must be outside protection */ 49/* This part must be outside protection */
50#include <trace/define_trace.h> 50#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index cc1d060cbf13..a13a62db3565 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -23,20 +23,17 @@
23#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY 23#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
24 24
25enum rxrpc_skb_trace { 25enum rxrpc_skb_trace {
26 rxrpc_skb_rx_cleaned, 26 rxrpc_skb_cleaned,
27 rxrpc_skb_rx_freed, 27 rxrpc_skb_freed,
28 rxrpc_skb_rx_got, 28 rxrpc_skb_got,
29 rxrpc_skb_rx_lost, 29 rxrpc_skb_lost,
30 rxrpc_skb_rx_purged, 30 rxrpc_skb_new,
31 rxrpc_skb_rx_received, 31 rxrpc_skb_purged,
32 rxrpc_skb_rx_rotated, 32 rxrpc_skb_received,
33 rxrpc_skb_rx_seen, 33 rxrpc_skb_rotated,
34 rxrpc_skb_tx_cleaned, 34 rxrpc_skb_seen,
35 rxrpc_skb_tx_freed, 35 rxrpc_skb_unshared,
36 rxrpc_skb_tx_got, 36 rxrpc_skb_unshared_nomem,
37 rxrpc_skb_tx_new,
38 rxrpc_skb_tx_rotated,
39 rxrpc_skb_tx_seen,
40}; 37};
41 38
42enum rxrpc_local_trace { 39enum rxrpc_local_trace {
@@ -228,20 +225,17 @@ enum rxrpc_tx_point {
228 * Declare tracing information enums and their string mappings for display. 225 * Declare tracing information enums and their string mappings for display.
229 */ 226 */
230#define rxrpc_skb_traces \ 227#define rxrpc_skb_traces \
231 EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ 228 EM(rxrpc_skb_cleaned, "CLN") \
232 EM(rxrpc_skb_rx_freed, "Rx FRE") \ 229 EM(rxrpc_skb_freed, "FRE") \
233 EM(rxrpc_skb_rx_got, "Rx GOT") \ 230 EM(rxrpc_skb_got, "GOT") \
234 EM(rxrpc_skb_rx_lost, "Rx *L*") \ 231 EM(rxrpc_skb_lost, "*L*") \
235 EM(rxrpc_skb_rx_purged, "Rx PUR") \ 232 EM(rxrpc_skb_new, "NEW") \
236 EM(rxrpc_skb_rx_received, "Rx RCV") \ 233 EM(rxrpc_skb_purged, "PUR") \
237 EM(rxrpc_skb_rx_rotated, "Rx ROT") \ 234 EM(rxrpc_skb_received, "RCV") \
238 EM(rxrpc_skb_rx_seen, "Rx SEE") \ 235 EM(rxrpc_skb_rotated, "ROT") \
239 EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ 236 EM(rxrpc_skb_seen, "SEE") \
240 EM(rxrpc_skb_tx_freed, "Tx FRE") \ 237 EM(rxrpc_skb_unshared, "UNS") \
241 EM(rxrpc_skb_tx_got, "Tx GOT") \ 238 E_(rxrpc_skb_unshared_nomem, "US0")
242 EM(rxrpc_skb_tx_new, "Tx NEW") \
243 EM(rxrpc_skb_tx_rotated, "Tx ROT") \
244 E_(rxrpc_skb_tx_seen, "Tx SEE")
245 239
246#define rxrpc_local_traces \ 240#define rxrpc_local_traces \
247 EM(rxrpc_local_got, "GOT") \ 241 EM(rxrpc_local_got, "GOT") \
@@ -498,10 +492,10 @@ rxrpc_tx_points;
498#define E_(a, b) { a, b } 492#define E_(a, b) { a, b }
499 493
500TRACE_EVENT(rxrpc_local, 494TRACE_EVENT(rxrpc_local,
501 TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, 495 TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
502 int usage, const void *where), 496 int usage, const void *where),
503 497
504 TP_ARGS(local, op, usage, where), 498 TP_ARGS(local_debug_id, op, usage, where),
505 499
506 TP_STRUCT__entry( 500 TP_STRUCT__entry(
507 __field(unsigned int, local ) 501 __field(unsigned int, local )
@@ -511,7 +505,7 @@ TRACE_EVENT(rxrpc_local,
511 ), 505 ),
512 506
513 TP_fast_assign( 507 TP_fast_assign(
514 __entry->local = local->debug_id; 508 __entry->local = local_debug_id;
515 __entry->op = op; 509 __entry->op = op;
516 __entry->usage = usage; 510 __entry->usage = usage;
517 __entry->where = where; 511 __entry->where = where;
@@ -643,13 +637,14 @@ TRACE_EVENT(rxrpc_call,
643 637
644TRACE_EVENT(rxrpc_skb, 638TRACE_EVENT(rxrpc_skb,
645 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, 639 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
646 int usage, int mod_count, const void *where), 640 int usage, int mod_count, u8 flags, const void *where),
647 641
648 TP_ARGS(skb, op, usage, mod_count, where), 642 TP_ARGS(skb, op, usage, mod_count, flags, where),
649 643
650 TP_STRUCT__entry( 644 TP_STRUCT__entry(
651 __field(struct sk_buff *, skb ) 645 __field(struct sk_buff *, skb )
652 __field(enum rxrpc_skb_trace, op ) 646 __field(enum rxrpc_skb_trace, op )
647 __field(u8, flags )
653 __field(int, usage ) 648 __field(int, usage )
654 __field(int, mod_count ) 649 __field(int, mod_count )
655 __field(const void *, where ) 650 __field(const void *, where )
@@ -657,14 +652,16 @@ TRACE_EVENT(rxrpc_skb,
657 652
658 TP_fast_assign( 653 TP_fast_assign(
659 __entry->skb = skb; 654 __entry->skb = skb;
655 __entry->flags = flags;
660 __entry->op = op; 656 __entry->op = op;
661 __entry->usage = usage; 657 __entry->usage = usage;
662 __entry->mod_count = mod_count; 658 __entry->mod_count = mod_count;
663 __entry->where = where; 659 __entry->where = where;
664 ), 660 ),
665 661
666 TP_printk("s=%p %s u=%d m=%d p=%pSR", 662 TP_printk("s=%p %cx %s u=%d m=%d p=%pSR",
667 __entry->skb, 663 __entry->skb,
664 __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R',
668 __print_symbolic(__entry->op, rxrpc_skb_traces), 665 __print_symbolic(__entry->op, rxrpc_skb_traces),
669 __entry->usage, 666 __entry->usage,
670 __entry->mod_count, 667 __entry->mod_count,
diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
index 0818f6286110..971cd02d2daf 100644
--- a/include/trace/events/tegra_apb_dma.h
+++ b/include/trace/events/tegra_apb_dma.h
@@ -1,5 +1,5 @@
1#if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ) 1#if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_TEGRA_APM_DMA_H 2#define _TRACE_TEGRA_APB_DMA_H
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <linux/dmaengine.h> 5#include <linux/dmaengine.h>
@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
55 TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) 55 TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
56); 56);
57 57
58#endif /* _TRACE_TEGRADMA_H */ 58#endif /* _TRACE_TEGRA_APB_DMA_H */
59 59
60/* This part must be outside protection */ 60/* This part must be outside protection */
61#include <trace/define_trace.h> 61#include <trace/define_trace.h>
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index fa1c753dcdbc..a5aa7d3ac6a1 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h
index 2ec3cc99ea4c..cbc1f5813f50 100644
--- a/include/uapi/linux/bpfilter.h
+++ b/include/uapi/linux/bpfilter.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI_LINUX_BPFILTER_H 2#ifndef _UAPI_LINUX_BPFILTER_H
3#define _UAPI_LINUX_BPFILTER_H 3#define _UAPI_LINUX_BPFILTER_H
4 4
diff --git a/include/uapi/linux/ipmi_bmc.h b/include/uapi/linux/ipmi_bmc.h
index 1670f0944227..782a03eb1086 100644
--- a/include/uapi/linux/ipmi_bmc.h
+++ b/include/uapi/linux/ipmi_bmc.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* 2/*
3 * Copyright (c) 2015-2018, Intel Corporation. 3 * Copyright (c) 2015-2018, Intel Corporation.
4 */ 4 */
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
index d10b832c58c5..0a52b7b093d3 100644
--- a/include/uapi/linux/isst_if.h
+++ b/include/uapi/linux/isst_if.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* 2/*
3 * Intel Speed Select Interface: OS to hardware Interface 3 * Intel Speed Select Interface: OS to hardware Interface
4 * Copyright (c) 2019, Intel Corporation. 4 * Copyright (c) 2019, Intel Corporation.
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index a18b719f49d4..784ba0b9690a 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -77,11 +77,6 @@
77 77
78#define JFFS2_ACL_VERSION 0x0001 78#define JFFS2_ACL_VERSION 0x0001
79 79
80// Maybe later...
81//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
82//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
83
84
85#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at 80#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
86 mount time, don't wait for it to 81 mount time, don't wait for it to
87 happen later */ 82 happen later */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 070d1bc7e725..20917c59f39c 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -410,21 +410,6 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
410 __u32 n_success; /* to/from KFD */ 410 __u32 n_success; /* to/from KFD */
411}; 411};
412 412
413/* Allocate GWS for specific queue
414 *
415 * @gpu_id: device identifier
416 * @queue_id: queue's id that GWS is allocated for
417 * @num_gws: how many GWS to allocate
418 * @first_gws: index of the first GWS allocated.
419 * only support contiguous GWS allocation
420 */
421struct kfd_ioctl_alloc_queue_gws_args {
422 __u32 gpu_id; /* to KFD */
423 __u32 queue_id; /* to KFD */
424 __u32 num_gws; /* to KFD */
425 __u32 first_gws; /* from KFD */
426};
427
428struct kfd_ioctl_get_dmabuf_info_args { 413struct kfd_ioctl_get_dmabuf_info_args {
429 __u64 size; /* from KFD */ 414 __u64 size; /* from KFD */
430 __u64 metadata_ptr; /* to KFD */ 415 __u64 metadata_ptr; /* to KFD */
@@ -544,10 +529,7 @@ enum kfd_mmio_remap {
544#define AMDKFD_IOC_IMPORT_DMABUF \ 529#define AMDKFD_IOC_IMPORT_DMABUF \
545 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) 530 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
546 531
547#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
548 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
549
550#define AMDKFD_COMMAND_START 0x01 532#define AMDKFD_COMMAND_START 0x01
551#define AMDKFD_COMMAND_END 0x1F 533#define AMDKFD_COMMAND_END 0x1E
552 534
553#endif 535#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a7c19540ce21..5e3f12d5359e 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -116,7 +116,7 @@ struct kvm_irq_level {
116 * ACPI gsi notion of irq. 116 * ACPI gsi notion of irq.
117 * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. 117 * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
118 * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. 118 * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
119 * For ARM: See Documentation/virtual/kvm/api.txt 119 * For ARM: See Documentation/virt/kvm/api.txt
120 */ 120 */
121 union { 121 union {
122 __u32 irq; 122 __u32 irq;
@@ -1086,7 +1086,7 @@ struct kvm_xen_hvm_config {
1086 * 1086 *
1087 * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies 1087 * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
1088 * the irqfd to operate in resampling mode for level triggered interrupt 1088 * the irqfd to operate in resampling mode for level triggered interrupt
1089 * emulation. See Documentation/virtual/kvm/api.txt. 1089 * emulation. See Documentation/virt/kvm/api.txt.
1090 */ 1090 */
1091#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) 1091#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
1092 1092
diff --git a/include/uapi/linux/netfilter/nf_synproxy.h b/include/uapi/linux/netfilter/nf_synproxy.h
index 6f3791c8946f..00d787f0260e 100644
--- a/include/uapi/linux/netfilter/nf_synproxy.h
+++ b/include/uapi/linux/netfilter/nf_synproxy.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _NF_SYNPROXY_H 2#ifndef _NF_SYNPROXY_H
3#define _NF_SYNPROXY_H 3#define _NF_SYNPROXY_H
4 4
diff --git a/include/uapi/linux/netfilter/xt_connlabel.h b/include/uapi/linux/netfilter/xt_connlabel.h
index 2312f0ec07b2..323f0dfc2a4e 100644
--- a/include/uapi/linux/netfilter/xt_connlabel.h
+++ b/include/uapi/linux/netfilter/xt_connlabel.h
@@ -1,4 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2
3#ifndef _UAPI_XT_CONNLABEL_H
4#define _UAPI_XT_CONNLABEL_H
5
2#include <linux/types.h> 6#include <linux/types.h>
3 7
4#define XT_CONNLABEL_MAXBIT 127 8#define XT_CONNLABEL_MAXBIT 127
@@ -11,3 +15,5 @@ struct xt_connlabel_mtinfo {
11 __u16 bit; 15 __u16 bit;
12 __u16 options; 16 __u16 options;
13}; 17};
18
19#endif /* _UAPI_XT_CONNLABEL_H */
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d760ee3..b5123ab8d54a 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
11 struct nf_acct *nfacct; 11 struct nf_acct *nfacct;
12}; 12};
13 13
14struct xt_nfacct_match_info_v1 {
15 char name[NFACCT_NAME_MAX];
16 struct nf_acct *nfacct __attribute__((aligned(8)));
17};
18
14#endif /* _XT_NFACCT_MATCH_H */ 19#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 75758ec26c8b..beb9a9d0c00a 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2863,7 +2863,7 @@ enum nl80211_attrs {
2863#define NL80211_HT_CAPABILITY_LEN 26 2863#define NL80211_HT_CAPABILITY_LEN 26
2864#define NL80211_VHT_CAPABILITY_LEN 12 2864#define NL80211_VHT_CAPABILITY_LEN 12
2865#define NL80211_HE_MIN_CAPABILITY_LEN 16 2865#define NL80211_HE_MIN_CAPABILITY_LEN 16
2866#define NL80211_HE_MAX_CAPABILITY_LEN 51 2866#define NL80211_HE_MAX_CAPABILITY_LEN 54
2867#define NL80211_MAX_NR_CIPHER_SUITES 5 2867#define NL80211_MAX_NR_CIPHER_SUITES 5
2868#define NL80211_MAX_NR_AKM_SUITES 2 2868#define NL80211_MAX_NR_AKM_SUITES 2
2869 2869
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 8654b2442f6a..592a0c1b77c9 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * Userspace interface for AMD Secure Encrypted Virtualization (SEV) 3 * Userspace interface for AMD Secure Encrypted Virtualization (SEV)
4 * platform management commands. 4 * platform management commands.
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index fd6b5f66e2c5..cba368e55863 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
250 __u32 rdma_mr_max; 250 __u32 rdma_mr_max;
251 __u32 rdma_mr_size; 251 __u32 rdma_mr_size;
252 __u8 tos; 252 __u8 tos;
253 __u8 sl;
253 __u32 cache_allocs; 254 __u32 cache_allocs;
254}; 255};
255 256
@@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
265 __u32 rdma_mr_max; 266 __u32 rdma_mr_max;
266 __u32 rdma_mr_size; 267 __u32 rdma_mr_size;
267 __u8 tos; 268 __u8 tos;
269 __u8 sl;
268 __u32 cache_allocs; 270 __u32 cache_allocs;
269}; 271};
270 272
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 782069dcf607..4accfa7e266d 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 1/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
2/* Types and definitions for AF_RXRPC. 2/* Types and definitions for AF_RXRPC.
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 5642c05e0da0..3cc3af1c2ee1 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -150,9 +150,6 @@
150 150
151#define PORT_PNX8XXX 70 151#define PORT_PNX8XXX 70
152 152
153/* Hilscher netx */
154#define PORT_NETX 71
155
156/* SUN4V Hypervisor Console */ 153/* SUN4V Hypervisor Console */
157#define PORT_SUNHV 72 154#define PORT_SUNHV 72
158 155
diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h
index 8eb96021709c..c3409c8ec0dd 100644
--- a/include/uapi/linux/socket.h
+++ b/include/uapi/linux/socket.h
@@ -6,17 +6,24 @@
6 * Desired design of maximum size and alignment (see RFC2553) 6 * Desired design of maximum size and alignment (see RFC2553)
7 */ 7 */
8#define _K_SS_MAXSIZE 128 /* Implementation specific max size */ 8#define _K_SS_MAXSIZE 128 /* Implementation specific max size */
9#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *))
10 /* Implementation specific desired alignment */
11 9
12typedef unsigned short __kernel_sa_family_t; 10typedef unsigned short __kernel_sa_family_t;
13 11
12/*
13 * The definition uses anonymous union and struct in order to control the
14 * default alignment.
15 */
14struct __kernel_sockaddr_storage { 16struct __kernel_sockaddr_storage {
15 __kernel_sa_family_t ss_family; /* address family */ 17 union {
16 /* Following field(s) are implementation specific */ 18 struct {
17 char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; 19 __kernel_sa_family_t ss_family; /* address family */
20 /* Following field(s) are implementation specific */
21 char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
18 /* space to achieve desired size, */ 22 /* space to achieve desired size, */
19 /* _SS_MAXSIZE value minus size of ss_family */ 23 /* _SS_MAXSIZE value minus size of ss_family */
20} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ 24 };
25 void *__align; /* implementation specific desired alignment */
26 };
27};
21 28
22#endif /* _UAPI_LINUX_SOCKET_H */ 29#endif /* _UAPI_LINUX_SOCKET_H */
diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h
index 3c9ee3020cbb..652f169a019e 100644
--- a/include/uapi/linux/usb/g_uvc.h
+++ b/include/uapi/linux/usb/g_uvc.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */ 1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/* 2/*
3 * g_uvc.h -- USB Video Class Gadget driver API 3 * g_uvc.h -- USB Video Class Gadget driver API
4 * 4 *
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 26f39816af14..c27289fd619a 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ 1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */
2/* 2/*
3 * Virtual Device for Guest <-> VMM/Host communication, type definitions 3 * Virtual Device for Guest <-> VMM/Host communication, type definitions
4 * which are also used for the vboxguest ioctl interface / by vboxsf 4 * which are also used for the vboxguest ioctl interface / by vboxsf
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
index 612f0c7d3558..9cec58a6a5ea 100644
--- a/include/uapi/linux/vboxguest.h
+++ b/include/uapi/linux/vboxguest.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ 1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */
2/* 2/*
3 * VBoxGuest - VirtualBox Guest Additions Driver Interface. 3 * VBoxGuest - VirtualBox Guest Additions Driver Interface.
4 * 4 *
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 9d9705ceda76..2427bc4d8eba 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -518,7 +518,13 @@ struct v4l2_pix_format {
518#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */ 518#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
519#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */ 519#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
520#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */ 520#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
521#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ 521
522/*
523 * Originally this had 'BA12' as fourcc, but this clashed with the older
524 * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc.
525 * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444.
526 */
527#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
522#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */ 528#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
523#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ 529#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
524#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ 530#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index ba1b460c9944..237e36a280cb 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -1,8 +1,8 @@
1/* SPDX-License-Identifier: BSD-3-Clause */ 1/* SPDX-License-Identifier: BSD-3-Clause */
2/* 2/*
3 * Virtio-iommu definition v0.9 3 * Virtio-iommu definition v0.12
4 * 4 *
5 * Copyright (C) 2018 Arm Ltd. 5 * Copyright (C) 2019 Arm Ltd.
6 */ 6 */
7#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H 7#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
8#define _UAPI_LINUX_VIRTIO_IOMMU_H 8#define _UAPI_LINUX_VIRTIO_IOMMU_H
@@ -11,26 +11,31 @@
11 11
12/* Feature bits */ 12/* Feature bits */
13#define VIRTIO_IOMMU_F_INPUT_RANGE 0 13#define VIRTIO_IOMMU_F_INPUT_RANGE 0
14#define VIRTIO_IOMMU_F_DOMAIN_BITS 1 14#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1
15#define VIRTIO_IOMMU_F_MAP_UNMAP 2 15#define VIRTIO_IOMMU_F_MAP_UNMAP 2
16#define VIRTIO_IOMMU_F_BYPASS 3 16#define VIRTIO_IOMMU_F_BYPASS 3
17#define VIRTIO_IOMMU_F_PROBE 4 17#define VIRTIO_IOMMU_F_PROBE 4
18#define VIRTIO_IOMMU_F_MMIO 5
18 19
19struct virtio_iommu_range { 20struct virtio_iommu_range_64 {
20 __u64 start; 21 __le64 start;
21 __u64 end; 22 __le64 end;
23};
24
25struct virtio_iommu_range_32 {
26 __le32 start;
27 __le32 end;
22}; 28};
23 29
24struct virtio_iommu_config { 30struct virtio_iommu_config {
25 /* Supported page sizes */ 31 /* Supported page sizes */
26 __u64 page_size_mask; 32 __le64 page_size_mask;
27 /* Supported IOVA range */ 33 /* Supported IOVA range */
28 struct virtio_iommu_range input_range; 34 struct virtio_iommu_range_64 input_range;
29 /* Max domain ID size */ 35 /* Max domain ID size */
30 __u8 domain_bits; 36 struct virtio_iommu_range_32 domain_range;
31 __u8 padding[3];
32 /* Probe buffer size */ 37 /* Probe buffer size */
33 __u32 probe_size; 38 __le32 probe_size;
34}; 39};
35 40
36/* Request types */ 41/* Request types */
@@ -49,6 +54,7 @@ struct virtio_iommu_config {
49#define VIRTIO_IOMMU_S_RANGE 0x05 54#define VIRTIO_IOMMU_S_RANGE 0x05
50#define VIRTIO_IOMMU_S_NOENT 0x06 55#define VIRTIO_IOMMU_S_NOENT 0x06
51#define VIRTIO_IOMMU_S_FAULT 0x07 56#define VIRTIO_IOMMU_S_FAULT 0x07
57#define VIRTIO_IOMMU_S_NOMEM 0x08
52 58
53struct virtio_iommu_req_head { 59struct virtio_iommu_req_head {
54 __u8 type; 60 __u8 type;
@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
78 84
79#define VIRTIO_IOMMU_MAP_F_READ (1 << 0) 85#define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
80#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) 86#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
81#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2) 87#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2)
82#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3)
83 88
84#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ 89#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
85 VIRTIO_IOMMU_MAP_F_WRITE | \ 90 VIRTIO_IOMMU_MAP_F_WRITE | \
86 VIRTIO_IOMMU_MAP_F_EXEC | \
87 VIRTIO_IOMMU_MAP_F_MMIO) 91 VIRTIO_IOMMU_MAP_F_MMIO)
88 92
89struct virtio_iommu_req_map { 93struct virtio_iommu_req_map {
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index 9a63ed6d062f..b022787ffb94 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 1/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
2/* 2/*
3 * Definitions for virtio-pmem devices. 3 * Definitions for virtio-pmem devices.
4 * 4 *
diff --git a/include/uapi/linux/vmcore.h b/include/uapi/linux/vmcore.h
index 022619668e0e..3e9da91866ff 100644
--- a/include/uapi/linux/vmcore.h
+++ b/include/uapi/linux/vmcore.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI_VMCORE_H 2#ifndef _UAPI_VMCORE_H
3#define _UAPI_VMCORE_H 3#define _UAPI_VMCORE_H
4 4
diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h
index c36f2d7675a4..7085c5dca9fa 100644
--- a/include/uapi/linux/wmi.h
+++ b/include/uapi/linux/wmi.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * User API methods for ACPI-WMI mapping driver 3 * User API methods for ACPI-WMI mapping driver
4 * 4 *
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 6d701af9fc42..fb792e882cef 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 2
3#ifndef __QCOM_FASTRPC_H__ 3#ifndef __QCOM_FASTRPC_H__
4#define __QCOM_FASTRPC_H__ 4#define __QCOM_FASTRPC_H__
diff --git a/include/uapi/rdma/rvt-abi.h b/include/uapi/rdma/rvt-abi.h
index 7328293c715c..7c05a02d2be5 100644
--- a/include/uapi/rdma/rvt-abi.h
+++ b/include/uapi/rdma/rvt-abi.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
2 2
3/* 3/*
4 * This file contains defines, structures, etc. that are used 4 * This file contains defines, structures, etc. that are used
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index 3dd8071ace7b..af735f55b291 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ 1/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */
2 2
3/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 3/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4/* Copyright (c) 2008-2019, IBM Corporation */ 4/* Copyright (c) 2008-2019, IBM Corporation */
@@ -180,6 +180,7 @@ struct siw_cqe {
180 * to control CQ arming. 180 * to control CQ arming.
181 */ 181 */
182struct siw_cq_ctrl { 182struct siw_cq_ctrl {
183 __aligned_u64 notify; 183 __u32 flags;
184 __u32 pad;
184}; 185};
185#endif 186#endif
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 17c7abd0803a..9988db6ad244 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* 2/*
3 * UFS Transport SGIO v4 BSG Message Support 3 * UFS Transport SGIO v4 BSG Message Support
4 * 4 *
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f39352cef382..9eee32f5e407 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* 2/*
3 * skl-tplg-interface.h - Intel DSP FW private data interface 3 * skl-tplg-interface.h - Intel DSP FW private data interface
4 * 4 *
diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h
index 1afca973eb09..e9f697467a86 100644
--- a/include/uapi/sound/sof/fw.h
+++ b/include/uapi/sound/sof/fw.h
@@ -13,6 +13,8 @@
13#ifndef __INCLUDE_UAPI_SOF_FW_H__ 13#ifndef __INCLUDE_UAPI_SOF_FW_H__
14#define __INCLUDE_UAPI_SOF_FW_H__ 14#define __INCLUDE_UAPI_SOF_FW_H__
15 15
16#include <linux/types.h>
17
16#define SND_SOF_FW_SIG_SIZE 4 18#define SND_SOF_FW_SIG_SIZE 4
17#define SND_SOF_FW_ABI 1 19#define SND_SOF_FW_ABI 1
18#define SND_SOF_FW_SIG "Reef" 20#define SND_SOF_FW_SIG "Reef"
@@ -46,8 +48,8 @@ enum snd_sof_fw_blk_type {
46 48
47struct snd_sof_blk_hdr { 49struct snd_sof_blk_hdr {
48 enum snd_sof_fw_blk_type type; 50 enum snd_sof_fw_blk_type type;
49 uint32_t size; /* bytes minus this header */ 51 __u32 size; /* bytes minus this header */
50 uint32_t offset; /* offset from base */ 52 __u32 offset; /* offset from base */
51} __packed; 53} __packed;
52 54
53/* 55/*
@@ -61,8 +63,8 @@ enum snd_sof_fw_mod_type {
61 63
62struct snd_sof_mod_hdr { 64struct snd_sof_mod_hdr {
63 enum snd_sof_fw_mod_type type; 65 enum snd_sof_fw_mod_type type;
64 uint32_t size; /* bytes minus this header */ 66 __u32 size; /* bytes minus this header */
65 uint32_t num_blocks; /* number of blocks */ 67 __u32 num_blocks; /* number of blocks */
66} __packed; 68} __packed;
67 69
68/* 70/*
@@ -70,9 +72,9 @@ struct snd_sof_mod_hdr {
70 */ 72 */
71struct snd_sof_fw_header { 73struct snd_sof_fw_header {
72 unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */ 74 unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */
73 uint32_t file_size; /* size of file minus this header */ 75 __u32 file_size; /* size of file minus this header */
74 uint32_t num_modules; /* number of modules */ 76 __u32 num_modules; /* number of modules */
75 uint32_t abi; /* version of header format */ 77 __u32 abi; /* version of header format */
76} __packed; 78} __packed;
77 79
78#endif 80#endif
diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
index 7868990b0d6f..5f4518e7a972 100644
--- a/include/uapi/sound/sof/header.h
+++ b/include/uapi/sound/sof/header.h
@@ -9,6 +9,8 @@
9#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ 9#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
10#define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ 10#define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
11 11
12#include <linux/types.h>
13
12/* 14/*
13 * Header for all non IPC ABI data. 15 * Header for all non IPC ABI data.
14 * 16 *
@@ -16,12 +18,12 @@
16 * Used by any bespoke component data structures or binary blobs. 18 * Used by any bespoke component data structures or binary blobs.
17 */ 19 */
18struct sof_abi_hdr { 20struct sof_abi_hdr {
19 uint32_t magic; /**< 'S', 'O', 'F', '\0' */ 21 __u32 magic; /**< 'S', 'O', 'F', '\0' */
20 uint32_t type; /**< component specific type */ 22 __u32 type; /**< component specific type */
21 uint32_t size; /**< size in bytes of data excl. this struct */ 23 __u32 size; /**< size in bytes of data excl. this struct */
22 uint32_t abi; /**< SOF ABI version */ 24 __u32 abi; /**< SOF ABI version */
23 uint32_t reserved[4]; /**< reserved for future use */ 25 __u32 reserved[4]; /**< reserved for future use */
24 uint32_t data[0]; /**< Component data - opaque to core */ 26 __u32 data[0]; /**< Component data - opaque to core */
25} __packed; 27} __packed;
26 28
27#endif 29#endif
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 4969817124a8..98b30c1613b2 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
109} 109}
110#endif 110#endif
111 111
112int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
113 unsigned long len);
114
112/* 115/*
113 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn 116 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
114 * @vma: VMA to map the pages into 117 * @vma: VMA to map the pages into
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index fc020c09b7e8..deff97217496 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -35,10 +35,10 @@ config PREEMPT_VOLUNTARY
35 35
36 Select this if you are building a kernel for a desktop system. 36 Select this if you are building a kernel for a desktop system.
37 37
38config PREEMPT_LL 38config PREEMPT
39 bool "Preemptible Kernel (Low-Latency Desktop)" 39 bool "Preemptible Kernel (Low-Latency Desktop)"
40 depends on !ARCH_NO_PREEMPT 40 depends on !ARCH_NO_PREEMPT
41 select PREEMPT 41 select PREEMPTION
42 select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK 42 select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
43 help 43 help
44 This option reduces the latency of the kernel by making 44 This option reduces the latency of the kernel by making
@@ -58,7 +58,7 @@ config PREEMPT_LL
58config PREEMPT_RT 58config PREEMPT_RT
59 bool "Fully Preemptible Kernel (Real-Time)" 59 bool "Fully Preemptible Kernel (Real-Time)"
60 depends on EXPERT && ARCH_SUPPORTS_RT 60 depends on EXPERT && ARCH_SUPPORTS_RT
61 select PREEMPT 61 select PREEMPTION
62 help 62 help
63 This option turns the kernel into a real-time kernel by replacing 63 This option turns the kernel into a real-time kernel by replacing
64 various locking primitives (spinlocks, rwlocks, etc.) with 64 various locking primitives (spinlocks, rwlocks, etc.) with
@@ -77,6 +77,6 @@ endchoice
77config PREEMPT_COUNT 77config PREEMPT_COUNT
78 bool 78 bool
79 79
80config PREEMPT 80config PREEMPTION
81 bool 81 bool
82 select PREEMPT_COUNT 82 select PREEMPT_COUNT
diff --git a/kernel/Makefile b/kernel/Makefile
index a8d923b5481b..ef0d95a190b4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
111obj-$(CONFIG_TORTURE_TEST) += torture.o 111obj-$(CONFIG_TORTURE_TEST) += torture.o
112 112
113obj-$(CONFIG_HAS_IOMEM) += iomem.o 113obj-$(CONFIG_HAS_IOMEM) += iomem.o
114obj-$(CONFIG_ZONE_DEVICE) += memremap.o
115obj-$(CONFIG_RSEQ) += rseq.o 114obj-$(CONFIG_RSEQ) += rseq.o
116 115
117obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o 116obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 8191a7db2777..66088a9e9b9e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
890 890
891static int bpf_jit_blind_insn(const struct bpf_insn *from, 891static int bpf_jit_blind_insn(const struct bpf_insn *from,
892 const struct bpf_insn *aux, 892 const struct bpf_insn *aux,
893 struct bpf_insn *to_buff) 893 struct bpf_insn *to_buff,
894 bool emit_zext)
894{ 895{
895 struct bpf_insn *to = to_buff; 896 struct bpf_insn *to = to_buff;
896 u32 imm_rnd = get_random_int(); 897 u32 imm_rnd = get_random_int();
@@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
1005 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1006 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1007 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1008 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009 if (emit_zext)
1010 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1008 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1011 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1009 break; 1012 break;
1010 1013
@@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1088 insn[1].code == 0) 1091 insn[1].code == 0)
1089 memcpy(aux, insn, sizeof(aux)); 1092 memcpy(aux, insn, sizeof(aux));
1090 1093
1091 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 1094 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1095 clone->aux->verifier_zext);
1092 if (!rewritten) 1096 if (!rewritten)
1093 continue; 1097 continue;
1094 1098
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d141f16f6fa..272071e9112f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1707 if (err) 1707 if (err)
1708 goto free_used_maps; 1708 goto free_used_maps;
1709 1709
1710 err = bpf_prog_new_fd(prog); 1710 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
1711 if (err < 0) { 1711 * effectively publicly exposed. However, retrieving via
1712 /* failed to allocate fd. 1712 * bpf_prog_get_fd_by_id() will take another reference,
1713 * bpf_prog_put() is needed because the above 1713 * therefore it cannot be gone underneath us.
1714 * bpf_prog_alloc_id() has published the prog 1714 *
1715 * to the userspace and the userspace may 1715 * Only for the time /after/ successful bpf_prog_new_fd()
1716 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1716 * and before returning to userspace, we might just hold
1717 */ 1717 * one reference and any parallel close on that fd could
1718 bpf_prog_put(prog); 1718 * rip everything out. Hence, below notifications must
1719 return err; 1719 * happen before bpf_prog_new_fd().
1720 } 1720 *
1721 1721 * Also, any failure handling from this point onwards must
1722 * be using bpf_prog_put() given the program is exposed.
1723 */
1722 bpf_prog_kallsyms_add(prog); 1724 bpf_prog_kallsyms_add(prog);
1723 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1725 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
1726
1727 err = bpf_prog_new_fd(prog);
1728 if (err < 0)
1729 bpf_prog_put(prog);
1724 return err; 1730 return err;
1725 1731
1726free_used_maps: 1732free_used_maps:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5900cbb966b1..b5c14c9d7b98 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
985 reg->smax_value = S64_MAX; 985 reg->smax_value = S64_MAX;
986 reg->umin_value = 0; 986 reg->umin_value = 0;
987 reg->umax_value = U64_MAX; 987 reg->umax_value = U64_MAX;
988
989 /* constant backtracking is enabled for root only for now */
990 reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
991} 988}
992 989
993/* Mark a register as having a completely unknown (scalar) value. */ 990/* Mark a register as having a completely unknown (scalar) value. */
@@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
1014 __mark_reg_not_init(regs + regno); 1011 __mark_reg_not_init(regs + regno);
1015 return; 1012 return;
1016 } 1013 }
1017 __mark_reg_unknown(regs + regno); 1014 regs += regno;
1015 __mark_reg_unknown(regs);
1016 /* constant backtracking is enabled for root without bpf2bpf calls */
1017 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1018 true : false;
1018} 1019}
1019 1020
1020static void __mark_reg_not_init(struct bpf_reg_state *reg) 1021static void __mark_reg_not_init(struct bpf_reg_state *reg)
@@ -8616,8 +8617,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
8616 } 8617 }
8617 8618
8618 if (is_narrower_load && size < target_size) { 8619 if (is_narrower_load && size < target_size) {
8619 u8 shift = (off & (size_default - 1)) * 8; 8620 u8 shift = bpf_ctx_narrow_load_shift(off, size,
8620 8621 size_default);
8621 if (ctx_field_size <= 4) { 8622 if (ctx_field_size <= 4) {
8622 if (shift) 8623 if (shift)
8623 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 8624 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
diff --git a/kernel/configs.c b/kernel/configs.c
index b062425ccf8d..c09ea4c995e1 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
1/* 2/*
2 * kernel/configs.c 3 * kernel/configs.c
3 * Echo the kernel .config file used to build the kernel 4 * Echo the kernel .config file used to build the kernel
@@ -6,21 +7,6 @@
6 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net> 7 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
7 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> 8 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
8 * Copyright (C) 2002 Hewlett-Packard Company 9 * Copyright (C) 2002 Hewlett-Packard Company
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */ 10 */
25 11
26#include <linux/kernel.h> 12#include <linux/kernel.h>
diff --git a/kernel/cred.c b/kernel/cred.c
index f9a0ce66c9c3..c0a4c12d38b2 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -144,7 +144,10 @@ void __put_cred(struct cred *cred)
144 BUG_ON(cred == current->cred); 144 BUG_ON(cred == current->cred);
145 BUG_ON(cred == current->real_cred); 145 BUG_ON(cred == current->real_cred);
146 146
147 call_rcu(&cred->rcu, put_cred_rcu); 147 if (cred->non_rcu)
148 put_cred_rcu(&cred->rcu);
149 else
150 call_rcu(&cred->rcu, put_cred_rcu);
148} 151}
149EXPORT_SYMBOL(__put_cred); 152EXPORT_SYMBOL(__put_cred);
150 153
@@ -261,6 +264,7 @@ struct cred *prepare_creds(void)
261 old = task->cred; 264 old = task->cred;
262 memcpy(new, old, sizeof(struct cred)); 265 memcpy(new, old, sizeof(struct cred));
263 266
267 new->non_rcu = 0;
264 atomic_set(&new->usage, 1); 268 atomic_set(&new->usage, 1);
265 set_cred_subscribers(new, 0); 269 set_cred_subscribers(new, 0);
266 get_group_info(new->group_info); 270 get_group_info(new->group_info);
@@ -544,7 +548,19 @@ const struct cred *override_creds(const struct cred *new)
544 548
545 validate_creds(old); 549 validate_creds(old);
546 validate_creds(new); 550 validate_creds(new);
547 get_cred(new); 551
552 /*
553 * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
554 *
555 * That means that we do not clear the 'non_rcu' flag, since
556 * we are only installing the cred into the thread-synchronous
557 * '->cred' pointer, not the '->real_cred' pointer that is
558 * visible to other threads under RCU.
559 *
560 * Also note that we did validate_creds() manually, not depending
561 * on the validation in 'get_cred()'.
562 */
563 get_new_cred((struct cred *)new);
548 alter_cred_subscribers(new, 1); 564 alter_cred_subscribers(new, 1);
549 rcu_assign_pointer(current->cred, new); 565 rcu_assign_pointer(current->cred, new);
550 alter_cred_subscribers(old, -1); 566 alter_cred_subscribers(old, -1);
@@ -681,6 +697,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
681 validate_creds(old); 697 validate_creds(old);
682 698
683 *new = *old; 699 *new = *old;
700 new->non_rcu = 0;
684 atomic_set(&new->usage, 1); 701 atomic_set(&new->usage, 1);
685 set_cred_subscribers(new, 0); 702 set_cred_subscribers(new, 0);
686 get_uid(new->user); 703 get_uid(new->user);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index bfc0c17f2a3d..69cfb4345388 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
230 */ 230 */
231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) 231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
232{ 232{
233 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 233 size_t count = size >> PAGE_SHIFT;
234 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
235 size_t align = get_order(PAGE_ALIGN(size));
236 struct page *page = NULL; 234 struct page *page = NULL;
237 struct cma *cma = NULL; 235 struct cma *cma = NULL;
238 236
@@ -243,13 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
243 241
244 /* CMA can be used only in the context which permits sleeping */ 242 /* CMA can be used only in the context which permits sleeping */
245 if (cma && gfpflags_allow_blocking(gfp)) { 243 if (cma && gfpflags_allow_blocking(gfp)) {
246 align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); 244 size_t align = get_order(size);
247 page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN); 245 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
246
247 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
248 } 248 }
249 249
250 /* Fallback allocation of normal pages */
251 if (!page)
252 page = alloc_pages_node(node, gfp, align);
253 return page; 250 return page;
254} 251}
255 252
@@ -266,7 +263,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
266 */ 263 */
267void dma_free_contiguous(struct device *dev, struct page *page, size_t size) 264void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
268{ 265{
269 if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT)) 266 if (!cma_release(dev_get_cma_area(dev), page,
267 PAGE_ALIGN(size) >> PAGE_SHIFT))
270 __free_pages(page, get_order(size)); 268 __free_pages(page, get_order(size));
271} 269}
272 270
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 59bdceea3737..706113c6bebc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
47{ 47{
48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
49 49
50 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
51 max_dma = dev->bus_dma_mask;
52
53 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 50 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
54} 51}
55 52
@@ -88,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
88struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
89 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
90{ 87{
88 size_t alloc_size = PAGE_ALIGN(size);
89 int node = dev_to_node(dev);
91 struct page *page = NULL; 90 struct page *page = NULL;
92 u64 phys_mask; 91 u64 phys_mask;
93 92
@@ -98,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
98 gfp &= ~__GFP_ZERO; 97 gfp &= ~__GFP_ZERO;
99 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 98 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
100 &phys_mask); 99 &phys_mask);
100 page = dma_alloc_contiguous(dev, alloc_size, gfp);
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102 dma_free_contiguous(dev, page, alloc_size);
103 page = NULL;
104 }
101again: 105again:
102 page = dma_alloc_contiguous(dev, size, gfp); 106 if (!page)
107 page = alloc_pages_node(node, gfp, get_order(alloc_size));
103 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
104 dma_free_contiguous(dev, page, size); 109 dma_free_contiguous(dev, page, size);
105 page = NULL; 110 page = NULL;
@@ -130,10 +135,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
130 if (!page) 135 if (!page)
131 return NULL; 136 return NULL;
132 137
133 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 138 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
139 !force_dma_unencrypted(dev)) {
134 /* remove any dirty cache lines on the kernel alias */ 140 /* remove any dirty cache lines on the kernel alias */
135 if (!PageHighMem(page)) 141 if (!PageHighMem(page))
136 arch_dma_prep_coherent(page, size); 142 arch_dma_prep_coherent(page, size);
143 *dma_handle = phys_to_dma(dev, page_to_phys(page));
137 /* return the page pointer as the opaque cookie */ 144 /* return the page pointer as the opaque cookie */
138 return page; 145 return page;
139 } 146 }
@@ -178,7 +185,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
178{ 185{
179 unsigned int page_order = get_order(size); 186 unsigned int page_order = get_order(size);
180 187
181 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 188 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
189 !force_dma_unencrypted(dev)) {
182 /* cpu_addr is a struct page cookie, not a kernel address */ 190 /* cpu_addr is a struct page cookie, not a kernel address */
183 __dma_direct_free_pages(dev, size, cpu_addr); 191 __dma_direct_free_pages(dev, size, cpu_addr);
184 return; 192 return;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 1f628e7ac709..b0038ca3aa92 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
116 int ret; 116 int ret;
117 117
118 if (!dev_is_dma_coherent(dev)) { 118 if (!dev_is_dma_coherent(dev)) {
119 unsigned long pfn;
120
119 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 121 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
120 return -ENXIO; 122 return -ENXIO;
121 123
122 page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, 124 /* If the PFN is not valid, we do not have a struct page */
123 dma_addr)); 125 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
126 if (!pfn_valid(pfn))
127 return -ENXIO;
128 page = pfn_to_page(pfn);
124 } else { 129 } else {
125 page = virt_to_page(cpu_addr); 130 page = virt_to_page(cpu_addr);
126 } 131 }
@@ -145,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
145} 150}
146EXPORT_SYMBOL(dma_get_sgtable_attrs); 151EXPORT_SYMBOL(dma_get_sgtable_attrs);
147 152
153#ifdef CONFIG_MMU
154/*
155 * Return the page attributes used for mapping dma_alloc_* memory, either in
156 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
157 */
158pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
159{
160 if (dev_is_dma_coherent(dev) ||
161 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
162 (attrs & DMA_ATTR_NON_CONSISTENT)))
163 return prot;
164 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
165 return arch_dma_mmap_pgprot(dev, prot, attrs);
166 return pgprot_noncached(prot);
167}
168#endif /* CONFIG_MMU */
169
148/* 170/*
149 * Create userspace mapping for the DMA-coherent memory. 171 * Create userspace mapping for the DMA-coherent memory.
150 */ 172 */
@@ -159,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
159 unsigned long pfn; 181 unsigned long pfn;
160 int ret = -ENXIO; 182 int ret = -ENXIO;
161 183
162 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 184 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
163 185
164 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 186 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
165 return ret; 187 return ret;
@@ -170,7 +192,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
170 if (!dev_is_dma_coherent(dev)) { 192 if (!dev_is_dma_coherent(dev)) {
171 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 193 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
172 return -ENXIO; 194 return -ENXIO;
195
196 /* If the PFN is not valid, we do not have a struct page */
173 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); 197 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
198 if (!pfn_valid(pfn))
199 return -ENXIO;
174 } else { 200 } else {
175 pfn = page_to_pfn(virt_to_page(cpu_addr)); 201 pfn = page_to_pfn(virt_to_page(cpu_addr));
176 } 202 }
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index a594aec07882..ffe78f0b2fe4 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
218 218
219 /* create a coherent mapping */ 219 /* create a coherent mapping */
220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
221 arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), 221 dma_pgprot(dev, PAGE_KERNEL, attrs),
222 __builtin_return_address(0)); 222 __builtin_return_address(0));
223 if (!ret) { 223 if (!ret) {
224 __dma_direct_free_pages(dev, size, page); 224 __dma_direct_free_pages(dev, size, page);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 026a14541a38..0463c1151bae 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11274,7 +11274,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
11274 goto err_unlock; 11274 goto err_unlock;
11275 } 11275 }
11276 11276
11277 perf_install_in_context(ctx, event, cpu); 11277 perf_install_in_context(ctx, event, event->cpu);
11278 perf_unpin_context(ctx); 11278 perf_unpin_context(ctx);
11279 mutex_unlock(&ctx->mutex); 11279 mutex_unlock(&ctx->mutex);
11280 11280
diff --git a/kernel/exit.c b/kernel/exit.c
index a75b6a7f458a..5b4a5dcce8f8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -720,6 +720,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
720 if (group_dead) 720 if (group_dead)
721 kill_orphaned_pgrp(tsk->group_leader, NULL); 721 kill_orphaned_pgrp(tsk->group_leader, NULL);
722 722
723 tsk->exit_state = EXIT_ZOMBIE;
723 if (unlikely(tsk->ptrace)) { 724 if (unlikely(tsk->ptrace)) {
724 int sig = thread_group_leader(tsk) && 725 int sig = thread_group_leader(tsk) &&
725 thread_group_empty(tsk) && 726 thread_group_empty(tsk) &&
@@ -733,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
733 autoreap = true; 734 autoreap = true;
734 } 735 }
735 736
736 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; 737 if (autoreap) {
737 if (tsk->exit_state == EXIT_DEAD) 738 tsk->exit_state = EXIT_DEAD;
738 list_add(&tsk->ptrace_entry, &dead); 739 list_add(&tsk->ptrace_entry, &dead);
740 }
739 741
740 /* mt-exec, de_thread() is waiting for group leader */ 742 /* mt-exec, de_thread() is waiting for group leader */
741 if (unlikely(tsk->signal->notify_count < 0)) 743 if (unlikely(tsk->signal->notify_count < 0))
diff --git a/kernel/fork.c b/kernel/fork.c
index d8ae0f1b4148..2852d0e76ea3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -726,7 +726,7 @@ void __put_task_struct(struct task_struct *tsk)
726 WARN_ON(tsk == current); 726 WARN_ON(tsk == current);
727 727
728 cgroup_free(tsk); 728 cgroup_free(tsk);
729 task_numa_free(tsk); 729 task_numa_free(tsk, true);
730 security_task_free(tsk); 730 security_task_free(tsk);
731 exit_creds(tsk); 731 exit_creds(tsk);
732 delayacct_tsk_free(tsk); 732 delayacct_tsk_free(tsk);
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 4352b08ae48d..6fef48033f96 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -251,11 +251,9 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
251 * Determine the number of vectors which need interrupt affinities 251 * Determine the number of vectors which need interrupt affinities
252 * assigned. If the pre/post request exhausts the available vectors 252 * assigned. If the pre/post request exhausts the available vectors
253 * then nothing to do here except for invoking the calc_sets() 253 * then nothing to do here except for invoking the calc_sets()
254 * callback so the device driver can adjust to the situation. If there 254 * callback so the device driver can adjust to the situation.
255 * is only a single vector, then managing the queue is pointless as
256 * well.
257 */ 255 */
258 if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors) 256 if (nvecs > affd->pre_vectors + affd->post_vectors)
259 affvecs = nvecs - affd->pre_vectors - affd->post_vectors; 257 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
260 else 258 else
261 affvecs = 0; 259 affvecs = 0;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9484e88dabc2..9be995fc3c5a 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
295 } 295 }
296} 296}
297 297
298static void irq_sysfs_del(struct irq_desc *desc)
299{
300 /*
301 * If irq_sysfs_init() has not yet been invoked (early boot), then
302 * irq_kobj_base is NULL and the descriptor was never added.
303 * kobject_del() complains about a object with no parent, so make
304 * it conditional.
305 */
306 if (irq_kobj_base)
307 kobject_del(&desc->kobj);
308}
309
298static int __init irq_sysfs_init(void) 310static int __init irq_sysfs_init(void)
299{ 311{
300 struct irq_desc *desc; 312 struct irq_desc *desc;
@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
325}; 337};
326 338
327static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 339static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
340static void irq_sysfs_del(struct irq_desc *desc) {}
328 341
329#endif /* CONFIG_SYSFS */ 342#endif /* CONFIG_SYSFS */
330 343
@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
438 * The sysfs entry must be serialized against a concurrent 451 * The sysfs entry must be serialized against a concurrent
439 * irq_sysfs_init() as well. 452 * irq_sysfs_init() as well.
440 */ 453 */
441 kobject_del(&desc->kobj); 454 irq_sysfs_del(desc);
442 delete_irq_desc(irq); 455 delete_irq_desc(irq);
443 456
444 /* 457 /*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 95a260f9214b..136ce049c4ad 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
263{ 263{
264 char namebuf[KSYM_NAME_LEN]; 264 char namebuf[KSYM_NAME_LEN];
265 265
266 if (is_ksym_addr(addr)) 266 if (is_ksym_addr(addr)) {
267 return !!get_symbol_pos(addr, symbolsize, offset); 267 get_symbol_pos(addr, symbolsize, offset);
268 return 1;
269 }
268 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
269 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
270} 272}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..d9770a5393c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
470 */ 470 */
471static void do_optimize_kprobes(void) 471static void do_optimize_kprobes(void)
472{ 472{
473 lockdep_assert_held(&text_mutex);
473 /* 474 /*
474 * The optimization/unoptimization refers online_cpus via 475 * The optimization/unoptimization refers online_cpus via
475 * stop_machine() and cpu-hotplug modifies online_cpus. 476 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
487 list_empty(&optimizing_list)) 488 list_empty(&optimizing_list))
488 return; 489 return;
489 490
490 mutex_lock(&text_mutex);
491 arch_optimize_kprobes(&optimizing_list); 491 arch_optimize_kprobes(&optimizing_list);
492 mutex_unlock(&text_mutex);
493} 492}
494 493
495/* 494/*
@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
500{ 499{
501 struct optimized_kprobe *op, *tmp; 500 struct optimized_kprobe *op, *tmp;
502 501
502 lockdep_assert_held(&text_mutex);
503 /* See comment in do_optimize_kprobes() */ 503 /* See comment in do_optimize_kprobes() */
504 lockdep_assert_cpus_held(); 504 lockdep_assert_cpus_held();
505 505
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
507 if (list_empty(&unoptimizing_list)) 507 if (list_empty(&unoptimizing_list))
508 return; 508 return;
509 509
510 mutex_lock(&text_mutex);
511 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 510 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
512 /* Loop free_list for disarming */ 511 /* Loop free_list for disarming */
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 512 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
524 } else 523 } else
525 list_del_init(&op->list); 524 list_del_init(&op->list);
526 } 525 }
527 mutex_unlock(&text_mutex);
528} 526}
529 527
530/* Reclaim all kprobes on the free_list */ 528/* Reclaim all kprobes on the free_list */
@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
556{ 554{
557 mutex_lock(&kprobe_mutex); 555 mutex_lock(&kprobe_mutex);
558 cpus_read_lock(); 556 cpus_read_lock();
557 mutex_lock(&text_mutex);
559 /* Lock modules while optimizing kprobes */ 558 /* Lock modules while optimizing kprobes */
560 mutex_lock(&module_mutex); 559 mutex_lock(&module_mutex);
561 560
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
583 do_free_cleaned_kprobes(); 582 do_free_cleaned_kprobes();
584 583
585 mutex_unlock(&module_mutex); 584 mutex_unlock(&module_mutex);
585 mutex_unlock(&text_mutex);
586 cpus_read_unlock(); 586 cpus_read_unlock();
587 mutex_unlock(&kprobe_mutex); 587 mutex_unlock(&kprobe_mutex);
588 588
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 341f52117f88..4861cf8e274b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg)
448 448
449unsigned long nr_stack_trace_entries; 449unsigned long nr_stack_trace_entries;
450 450
451#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 451#ifdef CONFIG_PROVE_LOCKING
452/* 452/*
453 * Stack-trace: tightly packed array of stack backtrace 453 * Stack-trace: tightly packed array of stack backtrace
454 * addresses. Protected by the graph_lock. 454 * addresses. Protected by the graph_lock.
@@ -491,7 +491,7 @@ unsigned int max_lockdep_depth;
491DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 491DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
492#endif 492#endif
493 493
494#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 494#ifdef CONFIG_PROVE_LOCKING
495/* 495/*
496 * Locking printouts: 496 * Locking printouts:
497 */ 497 */
@@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr)
2969#endif 2969#endif
2970} 2970}
2971 2971
2972#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 2972#ifdef CONFIG_PROVE_LOCKING
2973static int mark_lock(struct task_struct *curr, struct held_lock *this, 2973static int mark_lock(struct task_struct *curr, struct held_lock *this,
2974 enum lock_usage_bit new_bit); 2974 enum lock_usage_bit new_bit);
2975 2975
@@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
3608 return ret; 3608 return ret;
3609} 3609}
3610 3610
3611#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 3611#else /* CONFIG_PROVE_LOCKING */
3612 3612
3613static inline int 3613static inline int
3614mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) 3614mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
@@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr,
3627 return 0; 3627 return 0;
3628} 3628}
3629 3629
3630#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 3630#endif /* CONFIG_PROVE_LOCKING */
3631 3631
3632/* 3632/*
3633 * Initialize a lock instance's lock-class mapping info: 3633 * Initialize a lock instance's lock-class mapping info:
@@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
4321 */ 4321 */
4322static void check_flags(unsigned long flags) 4322static void check_flags(unsigned long flags)
4323{ 4323{
4324#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ 4324#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
4325 defined(CONFIG_TRACE_IRQFLAGS)
4326 if (!debug_locks) 4325 if (!debug_locks)
4327 return; 4326 return;
4328 4327
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 65b6a1600c8f..bda006f8a88b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
200 200
201static int lockdep_stats_show(struct seq_file *m, void *v) 201static int lockdep_stats_show(struct seq_file *m, void *v)
202{ 202{
203 struct lock_class *class;
204 unsigned long nr_unused = 0, nr_uncategorized = 0, 203 unsigned long nr_unused = 0, nr_uncategorized = 0,
205 nr_irq_safe = 0, nr_irq_unsafe = 0, 204 nr_irq_safe = 0, nr_irq_unsafe = 0,
206 nr_softirq_safe = 0, nr_softirq_unsafe = 0, 205 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
211 sum_forward_deps = 0; 210 sum_forward_deps = 0;
212 211
213#ifdef CONFIG_PROVE_LOCKING 212#ifdef CONFIG_PROVE_LOCKING
213 struct lock_class *class;
214
214 list_for_each_entry(class, &all_lock_classes, lock_entry) { 215 list_for_each_entry(class, &all_lock_classes, lock_entry) {
215 216
216 if (class->usage_mask == 0) 217 if (class->usage_mask == 0)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index edd1c082dbf5..5e069734363c 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
908 908
909 might_sleep(); 909 might_sleep();
910 910
911#ifdef CONFIG_DEBUG_MUTEXES
912 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
913#endif
914
911 ww = container_of(lock, struct ww_mutex, base); 915 ww = container_of(lock, struct ww_mutex, base);
912 if (use_ww_ctx && ww_ctx) { 916 if (use_ww_ctx && ww_ctx) {
913 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 917 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
@@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1379 */ 1383 */
1380int __sched mutex_trylock(struct mutex *lock) 1384int __sched mutex_trylock(struct mutex *lock)
1381{ 1385{
1382 bool locked = __mutex_trylock(lock); 1386 bool locked;
1387
1388#ifdef CONFIG_DEBUG_MUTEXES
1389 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1390#endif
1383 1391
1392 locked = __mutex_trylock(lock);
1384 if (locked) 1393 if (locked)
1385 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 1394 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1386 1395
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 37524a47f002..bd0f0d05724c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
666 preempt_disable(); 666 preempt_disable();
667 rcu_read_lock(); 667 rcu_read_lock();
668 owner = rwsem_owner_flags(sem, &flags); 668 owner = rwsem_owner_flags(sem, &flags);
669 if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner))) 669 /*
670 * Don't check the read-owner as the entry may be stale.
671 */
672 if ((flags & nonspinnable) ||
673 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
670 ret = false; 674 ret = false;
671 rcu_read_unlock(); 675 rcu_read_unlock();
672 preempt_enable(); 676 preempt_enable();
@@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
1000 atomic_long_add(-RWSEM_READER_BIAS, &sem->count); 1004 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
1001 adjustment = 0; 1005 adjustment = 0;
1002 if (rwsem_optimistic_spin(sem, false)) { 1006 if (rwsem_optimistic_spin(sem, false)) {
1007 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1003 /* 1008 /*
1004 * Wake up other readers in the wait list if the front 1009 * Wake up other readers in the wait list if the front
1005 * waiter is a reader. 1010 * waiter is a reader.
@@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
1014 } 1019 }
1015 return sem; 1020 return sem;
1016 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) { 1021 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1022 /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
1017 return sem; 1023 return sem;
1018 } 1024 }
1019 1025
@@ -1032,6 +1038,8 @@ queue:
1032 */ 1038 */
1033 if (adjustment && !(atomic_long_read(&sem->count) & 1039 if (adjustment && !(atomic_long_read(&sem->count) &
1034 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) { 1040 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1041 /* Provide lock ACQUIRE */
1042 smp_acquire__after_ctrl_dep();
1035 raw_spin_unlock_irq(&sem->wait_lock); 1043 raw_spin_unlock_irq(&sem->wait_lock);
1036 rwsem_set_reader_owned(sem); 1044 rwsem_set_reader_owned(sem);
1037 lockevent_inc(rwsem_rlock_fast); 1045 lockevent_inc(rwsem_rlock_fast);
@@ -1065,15 +1073,18 @@ queue:
1065 wake_up_q(&wake_q); 1073 wake_up_q(&wake_q);
1066 1074
1067 /* wait to be given the lock */ 1075 /* wait to be given the lock */
1068 while (true) { 1076 for (;;) {
1069 set_current_state(state); 1077 set_current_state(state);
1070 if (!waiter.task) 1078 if (!smp_load_acquire(&waiter.task)) {
1079 /* Matches rwsem_mark_wake()'s smp_store_release(). */
1071 break; 1080 break;
1081 }
1072 if (signal_pending_state(state, current)) { 1082 if (signal_pending_state(state, current)) {
1073 raw_spin_lock_irq(&sem->wait_lock); 1083 raw_spin_lock_irq(&sem->wait_lock);
1074 if (waiter.task) 1084 if (waiter.task)
1075 goto out_nolock; 1085 goto out_nolock;
1076 raw_spin_unlock_irq(&sem->wait_lock); 1086 raw_spin_unlock_irq(&sem->wait_lock);
1087 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1077 break; 1088 break;
1078 } 1089 }
1079 schedule(); 1090 schedule();
@@ -1083,6 +1094,7 @@ queue:
1083 __set_current_state(TASK_RUNNING); 1094 __set_current_state(TASK_RUNNING);
1084 lockevent_inc(rwsem_rlock); 1095 lockevent_inc(rwsem_rlock);
1085 return sem; 1096 return sem;
1097
1086out_nolock: 1098out_nolock:
1087 list_del(&waiter.list); 1099 list_del(&waiter.list);
1088 if (list_empty(&sem->wait_list)) { 1100 if (list_empty(&sem->wait_list)) {
@@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1123 1135
1124 /* do optimistic spinning and steal lock if possible */ 1136 /* do optimistic spinning and steal lock if possible */
1125 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) && 1137 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1126 rwsem_optimistic_spin(sem, true)) 1138 rwsem_optimistic_spin(sem, true)) {
1139 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1127 return sem; 1140 return sem;
1141 }
1128 1142
1129 /* 1143 /*
1130 * Disable reader optimistic spinning for this rwsem after 1144 * Disable reader optimistic spinning for this rwsem after
@@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1184wait: 1198wait:
1185 /* wait until we successfully acquire the lock */ 1199 /* wait until we successfully acquire the lock */
1186 set_current_state(state); 1200 set_current_state(state);
1187 while (true) { 1201 for (;;) {
1188 if (rwsem_try_write_lock(sem, wstate)) 1202 if (rwsem_try_write_lock(sem, wstate)) {
1203 /* rwsem_try_write_lock() implies ACQUIRE on success */
1189 break; 1204 break;
1205 }
1190 1206
1191 raw_spin_unlock_irq(&sem->wait_lock); 1207 raw_spin_unlock_irq(&sem->wait_lock);
1192 1208
diff --git a/kernel/module.c b/kernel/module.c
index 5933395af9a0..9ee93421269c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -65,9 +65,9 @@
65/* 65/*
66 * Modules' sections will be aligned on page boundaries 66 * Modules' sections will be aligned on page boundaries
67 * to ensure complete separation of code and data, but 67 * to ensure complete separation of code and data, but
68 * only when CONFIG_STRICT_MODULE_RWX=y 68 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
69 */ 69 */
70#ifdef CONFIG_STRICT_MODULE_RWX 70#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
71# define debug_align(X) ALIGN(X, PAGE_SIZE) 71# define debug_align(X) ALIGN(X, PAGE_SIZE)
72#else 72#else
73# define debug_align(X) (X) 73# define debug_align(X) (X)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b037f195473..010d578118d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3904,7 +3904,7 @@ void __noreturn do_task_dead(void)
3904 3904
3905static inline void sched_submit_work(struct task_struct *tsk) 3905static inline void sched_submit_work(struct task_struct *tsk)
3906{ 3906{
3907 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3907 if (!tsk->state)
3908 return; 3908 return;
3909 3909
3910 /* 3910 /*
@@ -3920,6 +3920,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
3920 preempt_enable_no_resched(); 3920 preempt_enable_no_resched();
3921 } 3921 }
3922 3922
3923 if (tsk_is_pi_blocked(tsk))
3924 return;
3925
3923 /* 3926 /*
3924 * If we are going to sleep and we have plugged IO queued, 3927 * If we are going to sleep and we have plugged IO queued,
3925 * make sure to submit it to avoid deadlocks. 3928 * make sure to submit it to avoid deadlocks.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 636ca6f88c8e..867b4bb6d4be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -40,6 +40,7 @@ struct sugov_policy {
40 struct task_struct *thread; 40 struct task_struct *thread;
41 bool work_in_progress; 41 bool work_in_progress;
42 42
43 bool limits_changed;
43 bool need_freq_update; 44 bool need_freq_update;
44}; 45};
45 46
@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
89 !cpufreq_this_cpu_can_update(sg_policy->policy)) 90 !cpufreq_this_cpu_can_update(sg_policy->policy))
90 return false; 91 return false;
91 92
92 if (unlikely(sg_policy->need_freq_update)) 93 if (unlikely(sg_policy->limits_changed)) {
94 sg_policy->limits_changed = false;
95 sg_policy->need_freq_update = true;
93 return true; 96 return true;
97 }
94 98
95 delta_ns = time - sg_policy->last_freq_update_time; 99 delta_ns = time - sg_policy->last_freq_update_time;
96 100
@@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
437static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) 441static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
438{ 442{
439 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 443 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
440 sg_policy->need_freq_update = true; 444 sg_policy->limits_changed = true;
441} 445}
442 446
443static void sugov_update_single(struct update_util_data *hook, u64 time, 447static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
457 if (!sugov_should_update_freq(sg_policy, time)) 461 if (!sugov_should_update_freq(sg_policy, time))
458 return; 462 return;
459 463
460 busy = sugov_cpu_is_busy(sg_cpu); 464 /* Limits may have changed, don't skip frequency update */
465 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
461 466
462 util = sugov_get_util(sg_cpu); 467 util = sugov_get_util(sg_cpu);
463 max = sg_cpu->max; 468 max = sg_cpu->max;
@@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy)
831 sg_policy->last_freq_update_time = 0; 836 sg_policy->last_freq_update_time = 0;
832 sg_policy->next_freq = 0; 837 sg_policy->next_freq = 0;
833 sg_policy->work_in_progress = false; 838 sg_policy->work_in_progress = false;
839 sg_policy->limits_changed = false;
834 sg_policy->need_freq_update = false; 840 sg_policy->need_freq_update = false;
835 sg_policy->cached_raw_freq = 0; 841 sg_policy->cached_raw_freq = 0;
836 842
@@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
879 mutex_unlock(&sg_policy->work_lock); 885 mutex_unlock(&sg_policy->work_lock);
880 } 886 }
881 887
882 sg_policy->need_freq_update = true; 888 sg_policy->limits_changed = true;
883} 889}
884 890
885struct cpufreq_governor schedutil_gov = { 891struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ef5b9f6b1d42..46122edd8552 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2088,17 +2088,13 @@ retry:
2088 } 2088 }
2089 2089
2090 deactivate_task(rq, next_task, 0); 2090 deactivate_task(rq, next_task, 0);
2091 sub_running_bw(&next_task->dl, &rq->dl);
2092 sub_rq_bw(&next_task->dl, &rq->dl);
2093 set_task_cpu(next_task, later_rq->cpu); 2091 set_task_cpu(next_task, later_rq->cpu);
2094 add_rq_bw(&next_task->dl, &later_rq->dl);
2095 2092
2096 /* 2093 /*
2097 * Update the later_rq clock here, because the clock is used 2094 * Update the later_rq clock here, because the clock is used
2098 * by the cpufreq_update_util() inside __add_running_bw(). 2095 * by the cpufreq_update_util() inside __add_running_bw().
2099 */ 2096 */
2100 update_rq_clock(later_rq); 2097 update_rq_clock(later_rq);
2101 add_running_bw(&next_task->dl, &later_rq->dl);
2102 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); 2098 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2103 ret = 1; 2099 ret = 1;
2104 2100
@@ -2186,11 +2182,7 @@ static void pull_dl_task(struct rq *this_rq)
2186 resched = true; 2182 resched = true;
2187 2183
2188 deactivate_task(src_rq, p, 0); 2184 deactivate_task(src_rq, p, 0);
2189 sub_running_bw(&p->dl, &src_rq->dl);
2190 sub_rq_bw(&p->dl, &src_rq->dl);
2191 set_task_cpu(p, this_cpu); 2185 set_task_cpu(p, this_cpu);
2192 add_rq_bw(&p->dl, &this_rq->dl);
2193 add_running_bw(&p->dl, &this_rq->dl);
2194 activate_task(this_rq, p, 0); 2186 activate_task(this_rq, p, 0);
2195 dmin = p->dl.deadline; 2187 dmin = p->dl.deadline;
2196 2188
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 036be95a87e9..bc9cfeaac8bd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1086,6 +1086,21 @@ struct numa_group {
1086 unsigned long faults[0]; 1086 unsigned long faults[0];
1087}; 1087};
1088 1088
1089/*
1090 * For functions that can be called in multiple contexts that permit reading
1091 * ->numa_group (see struct task_struct for locking rules).
1092 */
1093static struct numa_group *deref_task_numa_group(struct task_struct *p)
1094{
1095 return rcu_dereference_check(p->numa_group, p == current ||
1096 (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
1097}
1098
1099static struct numa_group *deref_curr_numa_group(struct task_struct *p)
1100{
1101 return rcu_dereference_protected(p->numa_group, p == current);
1102}
1103
1089static inline unsigned long group_faults_priv(struct numa_group *ng); 1104static inline unsigned long group_faults_priv(struct numa_group *ng);
1090static inline unsigned long group_faults_shared(struct numa_group *ng); 1105static inline unsigned long group_faults_shared(struct numa_group *ng);
1091 1106
@@ -1129,10 +1144,12 @@ static unsigned int task_scan_start(struct task_struct *p)
1129{ 1144{
1130 unsigned long smin = task_scan_min(p); 1145 unsigned long smin = task_scan_min(p);
1131 unsigned long period = smin; 1146 unsigned long period = smin;
1147 struct numa_group *ng;
1132 1148
1133 /* Scale the maximum scan period with the amount of shared memory. */ 1149 /* Scale the maximum scan period with the amount of shared memory. */
1134 if (p->numa_group) { 1150 rcu_read_lock();
1135 struct numa_group *ng = p->numa_group; 1151 ng = rcu_dereference(p->numa_group);
1152 if (ng) {
1136 unsigned long shared = group_faults_shared(ng); 1153 unsigned long shared = group_faults_shared(ng);
1137 unsigned long private = group_faults_priv(ng); 1154 unsigned long private = group_faults_priv(ng);
1138 1155
@@ -1140,6 +1157,7 @@ static unsigned int task_scan_start(struct task_struct *p)
1140 period *= shared + 1; 1157 period *= shared + 1;
1141 period /= private + shared + 1; 1158 period /= private + shared + 1;
1142 } 1159 }
1160 rcu_read_unlock();
1143 1161
1144 return max(smin, period); 1162 return max(smin, period);
1145} 1163}
@@ -1148,13 +1166,14 @@ static unsigned int task_scan_max(struct task_struct *p)
1148{ 1166{
1149 unsigned long smin = task_scan_min(p); 1167 unsigned long smin = task_scan_min(p);
1150 unsigned long smax; 1168 unsigned long smax;
1169 struct numa_group *ng;
1151 1170
1152 /* Watch for min being lower than max due to floor calculations */ 1171 /* Watch for min being lower than max due to floor calculations */
1153 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1172 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1154 1173
1155 /* Scale the maximum scan period with the amount of shared memory. */ 1174 /* Scale the maximum scan period with the amount of shared memory. */
1156 if (p->numa_group) { 1175 ng = deref_curr_numa_group(p);
1157 struct numa_group *ng = p->numa_group; 1176 if (ng) {
1158 unsigned long shared = group_faults_shared(ng); 1177 unsigned long shared = group_faults_shared(ng);
1159 unsigned long private = group_faults_priv(ng); 1178 unsigned long private = group_faults_priv(ng);
1160 unsigned long period = smax; 1179 unsigned long period = smax;
@@ -1186,7 +1205,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1186 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 1205 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1187 p->numa_work.next = &p->numa_work; 1206 p->numa_work.next = &p->numa_work;
1188 p->numa_faults = NULL; 1207 p->numa_faults = NULL;
1189 p->numa_group = NULL; 1208 RCU_INIT_POINTER(p->numa_group, NULL);
1190 p->last_task_numa_placement = 0; 1209 p->last_task_numa_placement = 0;
1191 p->last_sum_exec_runtime = 0; 1210 p->last_sum_exec_runtime = 0;
1192 1211
@@ -1233,7 +1252,16 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1233 1252
1234pid_t task_numa_group_id(struct task_struct *p) 1253pid_t task_numa_group_id(struct task_struct *p)
1235{ 1254{
1236 return p->numa_group ? p->numa_group->gid : 0; 1255 struct numa_group *ng;
1256 pid_t gid = 0;
1257
1258 rcu_read_lock();
1259 ng = rcu_dereference(p->numa_group);
1260 if (ng)
1261 gid = ng->gid;
1262 rcu_read_unlock();
1263
1264 return gid;
1237} 1265}
1238 1266
1239/* 1267/*
@@ -1258,11 +1286,13 @@ static inline unsigned long task_faults(struct task_struct *p, int nid)
1258 1286
1259static inline unsigned long group_faults(struct task_struct *p, int nid) 1287static inline unsigned long group_faults(struct task_struct *p, int nid)
1260{ 1288{
1261 if (!p->numa_group) 1289 struct numa_group *ng = deref_task_numa_group(p);
1290
1291 if (!ng)
1262 return 0; 1292 return 0;
1263 1293
1264 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1294 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1265 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1295 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1266} 1296}
1267 1297
1268static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1298static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
@@ -1400,12 +1430,13 @@ static inline unsigned long task_weight(struct task_struct *p, int nid,
1400static inline unsigned long group_weight(struct task_struct *p, int nid, 1430static inline unsigned long group_weight(struct task_struct *p, int nid,
1401 int dist) 1431 int dist)
1402{ 1432{
1433 struct numa_group *ng = deref_task_numa_group(p);
1403 unsigned long faults, total_faults; 1434 unsigned long faults, total_faults;
1404 1435
1405 if (!p->numa_group) 1436 if (!ng)
1406 return 0; 1437 return 0;
1407 1438
1408 total_faults = p->numa_group->total_faults; 1439 total_faults = ng->total_faults;
1409 1440
1410 if (!total_faults) 1441 if (!total_faults)
1411 return 0; 1442 return 0;
@@ -1419,7 +1450,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid,
1419bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1450bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1420 int src_nid, int dst_cpu) 1451 int src_nid, int dst_cpu)
1421{ 1452{
1422 struct numa_group *ng = p->numa_group; 1453 struct numa_group *ng = deref_curr_numa_group(p);
1423 int dst_nid = cpu_to_node(dst_cpu); 1454 int dst_nid = cpu_to_node(dst_cpu);
1424 int last_cpupid, this_cpupid; 1455 int last_cpupid, this_cpupid;
1425 1456
@@ -1600,13 +1631,14 @@ static bool load_too_imbalanced(long src_load, long dst_load,
1600static void task_numa_compare(struct task_numa_env *env, 1631static void task_numa_compare(struct task_numa_env *env,
1601 long taskimp, long groupimp, bool maymove) 1632 long taskimp, long groupimp, bool maymove)
1602{ 1633{
1634 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
1603 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1635 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1636 long imp = p_ng ? groupimp : taskimp;
1604 struct task_struct *cur; 1637 struct task_struct *cur;
1605 long src_load, dst_load; 1638 long src_load, dst_load;
1606 long load;
1607 long imp = env->p->numa_group ? groupimp : taskimp;
1608 long moveimp = imp;
1609 int dist = env->dist; 1639 int dist = env->dist;
1640 long moveimp = imp;
1641 long load;
1610 1642
1611 if (READ_ONCE(dst_rq->numa_migrate_on)) 1643 if (READ_ONCE(dst_rq->numa_migrate_on))
1612 return; 1644 return;
@@ -1645,21 +1677,22 @@ static void task_numa_compare(struct task_numa_env *env,
1645 * If dst and source tasks are in the same NUMA group, or not 1677 * If dst and source tasks are in the same NUMA group, or not
1646 * in any group then look only at task weights. 1678 * in any group then look only at task weights.
1647 */ 1679 */
1648 if (cur->numa_group == env->p->numa_group) { 1680 cur_ng = rcu_dereference(cur->numa_group);
1681 if (cur_ng == p_ng) {
1649 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1682 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1650 task_weight(cur, env->dst_nid, dist); 1683 task_weight(cur, env->dst_nid, dist);
1651 /* 1684 /*
1652 * Add some hysteresis to prevent swapping the 1685 * Add some hysteresis to prevent swapping the
1653 * tasks within a group over tiny differences. 1686 * tasks within a group over tiny differences.
1654 */ 1687 */
1655 if (cur->numa_group) 1688 if (cur_ng)
1656 imp -= imp / 16; 1689 imp -= imp / 16;
1657 } else { 1690 } else {
1658 /* 1691 /*
1659 * Compare the group weights. If a task is all by itself 1692 * Compare the group weights. If a task is all by itself
1660 * (not part of a group), use the task weight instead. 1693 * (not part of a group), use the task weight instead.
1661 */ 1694 */
1662 if (cur->numa_group && env->p->numa_group) 1695 if (cur_ng && p_ng)
1663 imp += group_weight(cur, env->src_nid, dist) - 1696 imp += group_weight(cur, env->src_nid, dist) -
1664 group_weight(cur, env->dst_nid, dist); 1697 group_weight(cur, env->dst_nid, dist);
1665 else 1698 else
@@ -1757,11 +1790,12 @@ static int task_numa_migrate(struct task_struct *p)
1757 .best_imp = 0, 1790 .best_imp = 0,
1758 .best_cpu = -1, 1791 .best_cpu = -1,
1759 }; 1792 };
1793 unsigned long taskweight, groupweight;
1760 struct sched_domain *sd; 1794 struct sched_domain *sd;
1795 long taskimp, groupimp;
1796 struct numa_group *ng;
1761 struct rq *best_rq; 1797 struct rq *best_rq;
1762 unsigned long taskweight, groupweight;
1763 int nid, ret, dist; 1798 int nid, ret, dist;
1764 long taskimp, groupimp;
1765 1799
1766 /* 1800 /*
1767 * Pick the lowest SD_NUMA domain, as that would have the smallest 1801 * Pick the lowest SD_NUMA domain, as that would have the smallest
@@ -1807,7 +1841,8 @@ static int task_numa_migrate(struct task_struct *p)
1807 * multiple NUMA nodes; in order to better consolidate the group, 1841 * multiple NUMA nodes; in order to better consolidate the group,
1808 * we need to check other locations. 1842 * we need to check other locations.
1809 */ 1843 */
1810 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { 1844 ng = deref_curr_numa_group(p);
1845 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
1811 for_each_online_node(nid) { 1846 for_each_online_node(nid) {
1812 if (nid == env.src_nid || nid == p->numa_preferred_nid) 1847 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1813 continue; 1848 continue;
@@ -1840,7 +1875,7 @@ static int task_numa_migrate(struct task_struct *p)
1840 * A task that migrated to a second choice node will be better off 1875 * A task that migrated to a second choice node will be better off
1841 * trying for a better one later. Do not set the preferred node here. 1876 * trying for a better one later. Do not set the preferred node here.
1842 */ 1877 */
1843 if (p->numa_group) { 1878 if (ng) {
1844 if (env.best_cpu == -1) 1879 if (env.best_cpu == -1)
1845 nid = env.src_nid; 1880 nid = env.src_nid;
1846 else 1881 else
@@ -2135,6 +2170,7 @@ static void task_numa_placement(struct task_struct *p)
2135 unsigned long total_faults; 2170 unsigned long total_faults;
2136 u64 runtime, period; 2171 u64 runtime, period;
2137 spinlock_t *group_lock = NULL; 2172 spinlock_t *group_lock = NULL;
2173 struct numa_group *ng;
2138 2174
2139 /* 2175 /*
2140 * The p->mm->numa_scan_seq field gets updated without 2176 * The p->mm->numa_scan_seq field gets updated without
@@ -2152,8 +2188,9 @@ static void task_numa_placement(struct task_struct *p)
2152 runtime = numa_get_avg_runtime(p, &period); 2188 runtime = numa_get_avg_runtime(p, &period);
2153 2189
2154 /* If the task is part of a group prevent parallel updates to group stats */ 2190 /* If the task is part of a group prevent parallel updates to group stats */
2155 if (p->numa_group) { 2191 ng = deref_curr_numa_group(p);
2156 group_lock = &p->numa_group->lock; 2192 if (ng) {
2193 group_lock = &ng->lock;
2157 spin_lock_irq(group_lock); 2194 spin_lock_irq(group_lock);
2158 } 2195 }
2159 2196
@@ -2194,7 +2231,7 @@ static void task_numa_placement(struct task_struct *p)
2194 p->numa_faults[cpu_idx] += f_diff; 2231 p->numa_faults[cpu_idx] += f_diff;
2195 faults += p->numa_faults[mem_idx]; 2232 faults += p->numa_faults[mem_idx];
2196 p->total_numa_faults += diff; 2233 p->total_numa_faults += diff;
2197 if (p->numa_group) { 2234 if (ng) {
2198 /* 2235 /*
2199 * safe because we can only change our own group 2236 * safe because we can only change our own group
2200 * 2237 *
@@ -2202,14 +2239,14 @@ static void task_numa_placement(struct task_struct *p)
2202 * nid and priv in a specific region because it 2239 * nid and priv in a specific region because it
2203 * is at the beginning of the numa_faults array. 2240 * is at the beginning of the numa_faults array.
2204 */ 2241 */
2205 p->numa_group->faults[mem_idx] += diff; 2242 ng->faults[mem_idx] += diff;
2206 p->numa_group->faults_cpu[mem_idx] += f_diff; 2243 ng->faults_cpu[mem_idx] += f_diff;
2207 p->numa_group->total_faults += diff; 2244 ng->total_faults += diff;
2208 group_faults += p->numa_group->faults[mem_idx]; 2245 group_faults += ng->faults[mem_idx];
2209 } 2246 }
2210 } 2247 }
2211 2248
2212 if (!p->numa_group) { 2249 if (!ng) {
2213 if (faults > max_faults) { 2250 if (faults > max_faults) {
2214 max_faults = faults; 2251 max_faults = faults;
2215 max_nid = nid; 2252 max_nid = nid;
@@ -2220,8 +2257,8 @@ static void task_numa_placement(struct task_struct *p)
2220 } 2257 }
2221 } 2258 }
2222 2259
2223 if (p->numa_group) { 2260 if (ng) {
2224 numa_group_count_active_nodes(p->numa_group); 2261 numa_group_count_active_nodes(ng);
2225 spin_unlock_irq(group_lock); 2262 spin_unlock_irq(group_lock);
2226 max_nid = preferred_group_nid(p, max_nid); 2263 max_nid = preferred_group_nid(p, max_nid);
2227 } 2264 }
@@ -2255,7 +2292,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2255 int cpu = cpupid_to_cpu(cpupid); 2292 int cpu = cpupid_to_cpu(cpupid);
2256 int i; 2293 int i;
2257 2294
2258 if (unlikely(!p->numa_group)) { 2295 if (unlikely(!deref_curr_numa_group(p))) {
2259 unsigned int size = sizeof(struct numa_group) + 2296 unsigned int size = sizeof(struct numa_group) +
2260 4*nr_node_ids*sizeof(unsigned long); 2297 4*nr_node_ids*sizeof(unsigned long);
2261 2298
@@ -2291,7 +2328,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2291 if (!grp) 2328 if (!grp)
2292 goto no_join; 2329 goto no_join;
2293 2330
2294 my_grp = p->numa_group; 2331 my_grp = deref_curr_numa_group(p);
2295 if (grp == my_grp) 2332 if (grp == my_grp)
2296 goto no_join; 2333 goto no_join;
2297 2334
@@ -2353,13 +2390,24 @@ no_join:
2353 return; 2390 return;
2354} 2391}
2355 2392
2356void task_numa_free(struct task_struct *p) 2393/*
2394 * Get rid of NUMA staticstics associated with a task (either current or dead).
2395 * If @final is set, the task is dead and has reached refcount zero, so we can
2396 * safely free all relevant data structures. Otherwise, there might be
2397 * concurrent reads from places like load balancing and procfs, and we should
2398 * reset the data back to default state without freeing ->numa_faults.
2399 */
2400void task_numa_free(struct task_struct *p, bool final)
2357{ 2401{
2358 struct numa_group *grp = p->numa_group; 2402 /* safe: p either is current or is being freed by current */
2359 void *numa_faults = p->numa_faults; 2403 struct numa_group *grp = rcu_dereference_raw(p->numa_group);
2404 unsigned long *numa_faults = p->numa_faults;
2360 unsigned long flags; 2405 unsigned long flags;
2361 int i; 2406 int i;
2362 2407
2408 if (!numa_faults)
2409 return;
2410
2363 if (grp) { 2411 if (grp) {
2364 spin_lock_irqsave(&grp->lock, flags); 2412 spin_lock_irqsave(&grp->lock, flags);
2365 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2413 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2372,8 +2420,14 @@ void task_numa_free(struct task_struct *p)
2372 put_numa_group(grp); 2420 put_numa_group(grp);
2373 } 2421 }
2374 2422
2375 p->numa_faults = NULL; 2423 if (final) {
2376 kfree(numa_faults); 2424 p->numa_faults = NULL;
2425 kfree(numa_faults);
2426 } else {
2427 p->total_numa_faults = 0;
2428 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2429 numa_faults[i] = 0;
2430 }
2377} 2431}
2378 2432
2379/* 2433/*
@@ -2426,7 +2480,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2426 * actively using should be counted as local. This allows the 2480 * actively using should be counted as local. This allows the
2427 * scan rate to slow down when a workload has settled down. 2481 * scan rate to slow down when a workload has settled down.
2428 */ 2482 */
2429 ng = p->numa_group; 2483 ng = deref_curr_numa_group(p);
2430 if (!priv && !local && ng && ng->active_nodes > 1 && 2484 if (!priv && !local && ng && ng->active_nodes > 1 &&
2431 numa_is_active_node(cpu_node, ng) && 2485 numa_is_active_node(cpu_node, ng) &&
2432 numa_is_active_node(mem_node, ng)) 2486 numa_is_active_node(mem_node, ng))
@@ -10444,18 +10498,22 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
10444{ 10498{
10445 int node; 10499 int node;
10446 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 10500 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
10501 struct numa_group *ng;
10447 10502
10503 rcu_read_lock();
10504 ng = rcu_dereference(p->numa_group);
10448 for_each_online_node(node) { 10505 for_each_online_node(node) {
10449 if (p->numa_faults) { 10506 if (p->numa_faults) {
10450 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 10507 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
10451 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 10508 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
10452 } 10509 }
10453 if (p->numa_group) { 10510 if (ng) {
10454 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)], 10511 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
10455 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)]; 10512 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
10456 } 10513 }
10457 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 10514 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
10458 } 10515 }
10516 rcu_read_unlock();
10459} 10517}
10460#endif /* CONFIG_NUMA_BALANCING */ 10518#endif /* CONFIG_NUMA_BALANCING */
10461#endif /* CONFIG_SCHED_DEBUG */ 10519#endif /* CONFIG_SCHED_DEBUG */
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7acc632c3b82..6e52b67b420e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
1051 1051
1052 if (!rcu_access_pointer(group->poll_kworker)) { 1052 if (!rcu_access_pointer(group->poll_kworker)) {
1053 struct sched_param param = { 1053 struct sched_param param = {
1054 .sched_priority = MAX_RT_PRIO - 1, 1054 .sched_priority = 1,
1055 }; 1055 };
1056 struct kthread_worker *kworker; 1056 struct kthread_worker *kworker;
1057 1057
@@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
1061 mutex_unlock(&group->trigger_lock); 1061 mutex_unlock(&group->trigger_lock);
1062 return ERR_CAST(kworker); 1062 return ERR_CAST(kworker);
1063 } 1063 }
1064 sched_setscheduler(kworker->task, SCHED_FIFO, &param); 1064 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
1065 kthread_init_delayed_work(&group->poll_work, 1065 kthread_init_delayed_work(&group->poll_work,
1066 psi_poll_work); 1066 psi_poll_work);
1067 rcu_assign_pointer(group->poll_kworker, kworker); 1067 rcu_assign_pointer(group->poll_kworker, kworker);
@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132 */ 1132 */
1133 if (kworker_to_destroy) { 1133 if (kworker_to_destroy) {
1134 /*
1135 * After the RCU grace period has expired, the worker
1136 * can no longer be found through group->poll_kworker.
1137 * But it might have been already scheduled before
1138 * that - deschedule it cleanly before destroying it.
1139 */
1134 kthread_cancel_delayed_work_sync(&group->poll_work); 1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1135 kthread_destroy_worker(kworker_to_destroy); 1143 kthread_destroy_worker(kworker_to_destroy);
1136 } 1144 }
1137 kfree(t); 1145 kfree(t);
diff --git a/kernel/signal.c b/kernel/signal.c
index 91b789dd6e72..534fec266a33 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -90,6 +90,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true; 91 return true;
92 92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
93 return sig_handler_ignored(handler, sig); 98 return sig_handler_ignored(handler, sig);
94} 99}
95 100
@@ -349,7 +354,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
349 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
350 * Group stop states are cleared and the group stop count is consumed if 355 * Group stop states are cleared and the group stop count is consumed if
351 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
352 * stop, the appropriate %SIGNAL_* flags are set. 357 * stop, the appropriate `SIGNAL_*` flags are set.
353 * 358 *
354 * CONTEXT: 359 * CONTEXT:
355 * Must be called with @task->sighand->siglock held. 360 * Must be called with @task->sighand->siglock held.
@@ -1885,6 +1890,7 @@ static void do_notify_pidfd(struct task_struct *task)
1885{ 1890{
1886 struct pid *pid; 1891 struct pid *pid;
1887 1892
1893 WARN_ON(task->exit_state == 0);
1888 pid = task_pid(task); 1894 pid = task_pid(task);
1889 wake_up_all(&pid->wait_pidfd); 1895 wake_up_all(&pid->wait_pidfd);
1890} 1896}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d911c8470149..ca69290bee2a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -146,6 +146,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
147{ 147{
148 tk->offs_boot = ktime_add(tk->offs_boot, delta); 148 tk->offs_boot = ktime_add(tk->offs_boot, delta);
149 /*
150 * Timespec representation for VDSO update to avoid 64bit division
151 * on every update.
152 */
153 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
149} 154}
150 155
151/* 156/*
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 8cf3596a4ce6..4bc37ac3bb05 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata,
17 struct timekeeper *tk) 17 struct timekeeper *tk)
18{ 18{
19 struct vdso_timestamp *vdso_ts; 19 struct vdso_timestamp *vdso_ts;
20 u64 nsec; 20 u64 nsec, sec;
21 21
22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; 22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; 23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask;
@@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata,
45 } 45 }
46 vdso_ts->nsec = nsec; 46 vdso_ts->nsec = nsec;
47 47
48 /* CLOCK_MONOTONIC_RAW */ 48 /* Copy MONOTONIC time for BOOTTIME */
49 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 49 sec = vdso_ts->sec;
50 vdso_ts->sec = tk->raw_sec; 50 /* Add the boot offset */
51 vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 51 sec += tk->monotonic_to_boot.tv_sec;
52 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
52 53
53 /* CLOCK_BOOTTIME */ 54 /* CLOCK_BOOTTIME */
54 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; 55 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
55 vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 56 vdso_ts->sec = sec;
56 nsec = tk->tkr_mono.xtime_nsec; 57
57 nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
58 ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
59 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 58 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
60 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); 59 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
61 vdso_ts->sec++; 60 vdso_ts->sec++;
62 } 61 }
63 vdso_ts->nsec = nsec; 62 vdso_ts->nsec = nsec;
64 63
64 /* CLOCK_MONOTONIC_RAW */
65 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
66 vdso_ts->sec = tk->raw_sec;
67 vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
68
65 /* CLOCK_TAI */ 69 /* CLOCK_TAI */
66 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; 70 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
67 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; 71 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca34503f178..f9821a3374e9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
3095 hnd = &iter->probe_entry->hlist; 3095 hnd = &iter->probe_entry->hlist;
3096 3096
3097 hash = iter->probe->ops.func_hash->filter_hash; 3097 hash = iter->probe->ops.func_hash->filter_hash;
3098
3099 /*
3100 * A probe being registered may temporarily have an empty hash
3101 * and it's at the end of the func_probes list.
3102 */
3103 if (!hash || hash == EMPTY_HASH)
3104 return NULL;
3105
3098 size = 1 << hash->size_bits; 3106 size = 1 << hash->size_bits;
3099 3107
3100 retry: 3108 retry:
@@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4320 4328
4321 mutex_unlock(&ftrace_lock); 4329 mutex_unlock(&ftrace_lock);
4322 4330
4331 /*
4332 * Note, there's a small window here that the func_hash->filter_hash
4333 * may be NULL or empty. Need to be carefule when reading the loop.
4334 */
4323 mutex_lock(&probe->ops.func_hash->regex_lock); 4335 mutex_lock(&probe->ops.func_hash->regex_lock);
4324 4336
4325 orig_hash = &probe->ops.func_hash->filter_hash; 4337 orig_hash = &probe->ops.func_hash->filter_hash;
4326 old_hash = *orig_hash; 4338 old_hash = *orig_hash;
4327 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4339 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4328 4340
4341 if (!hash) {
4342 ret = -ENOMEM;
4343 goto out;
4344 }
4345
4329 ret = ftrace_match_records(hash, glob, strlen(glob)); 4346 ret = ftrace_match_records(hash, glob, strlen(glob));
4330 4347
4331 /* Nothing found? */ 4348 /* Nothing found? */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 525a97fbbc60..563e80f9006a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1567,9 +1567,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1567 1567
1568/** 1568/**
1569 * update_max_tr_single - only copy one trace over, and reset the rest 1569 * update_max_tr_single - only copy one trace over, and reset the rest
1570 * @tr - tracer 1570 * @tr: tracer
1571 * @tsk - task with the latency 1571 * @tsk: task with the latency
1572 * @cpu - the cpu of the buffer to copy. 1572 * @cpu: the cpu of the buffer to copy.
1573 * 1573 *
1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1575 */ 1575 */
@@ -1767,7 +1767,7 @@ static void __init apply_trace_boot_options(void);
1767 1767
1768/** 1768/**
1769 * register_tracer - register a tracer with the ftrace system. 1769 * register_tracer - register a tracer with the ftrace system.
1770 * @type - the plugin for the tracer 1770 * @type: the plugin for the tracer
1771 * 1771 *
1772 * Register a new plugin tracer. 1772 * Register a new plugin tracer.
1773 */ 1773 */
@@ -2230,9 +2230,9 @@ static bool tracing_record_taskinfo_skip(int flags)
2230/** 2230/**
2231 * tracing_record_taskinfo - record the task info of a task 2231 * tracing_record_taskinfo - record the task info of a task
2232 * 2232 *
2233 * @task - task to record 2233 * @task: task to record
2234 * @flags - TRACE_RECORD_CMDLINE for recording comm 2234 * @flags: TRACE_RECORD_CMDLINE for recording comm
2235 * - TRACE_RECORD_TGID for recording tgid 2235 * TRACE_RECORD_TGID for recording tgid
2236 */ 2236 */
2237void tracing_record_taskinfo(struct task_struct *task, int flags) 2237void tracing_record_taskinfo(struct task_struct *task, int flags)
2238{ 2238{
@@ -2258,10 +2258,10 @@ void tracing_record_taskinfo(struct task_struct *task, int flags)
2258/** 2258/**
2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2260 * 2260 *
2261 * @prev - previous task during sched_switch 2261 * @prev: previous task during sched_switch
2262 * @next - next task during sched_switch 2262 * @next: next task during sched_switch
2263 * @flags - TRACE_RECORD_CMDLINE for recording comm 2263 * @flags: TRACE_RECORD_CMDLINE for recording comm
2264 * TRACE_RECORD_TGID for recording tgid 2264 * TRACE_RECORD_TGID for recording tgid
2265 */ 2265 */
2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2267 struct task_struct *next, int flags) 2267 struct task_struct *next, int flags)
@@ -3072,7 +3072,9 @@ static void trace_printk_start_stop_comm(int enabled)
3072 3072
3073/** 3073/**
3074 * trace_vbprintk - write binary msg to tracing buffer 3074 * trace_vbprintk - write binary msg to tracing buffer
3075 * 3075 * @ip: The address of the caller
3076 * @fmt: The string format to write to the buffer
3077 * @args: Arguments for @fmt
3076 */ 3078 */
3077int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3079int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3078{ 3080{
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c7506bc81b75..648930823b57 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -787,7 +787,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
787 return ret; 787 return ret;
788} 788}
789 789
790static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 790int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
791{ 791{
792 char *event = NULL, *sub = NULL, *match; 792 char *event = NULL, *sub = NULL, *match;
793 int ret; 793 int ret;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 69ebf3c2f1b5..78af97163147 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
137 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) 137 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
138 return 0; 138 return 0;
139 139
140 /*
141 * Do not trace a function if it's filtered by set_graph_notrace.
142 * Make the index of ret stack negative to indicate that it should
143 * ignore further functions. But it needs its own ret stack entry
144 * to recover the original index in order to continue tracing after
145 * returning from the function.
146 */
140 if (ftrace_graph_notrace_addr(trace->func)) { 147 if (ftrace_graph_notrace_addr(trace->func)) {
141 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); 148 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
142 /* 149 /*
@@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
156 return 0; 163 return 0;
157 164
158 /* 165 /*
159 * Do not trace a function if it's filtered by set_graph_notrace.
160 * Make the index of ret stack negative to indicate that it should
161 * ignore further functions. But it needs its own ret stack entry
162 * to recover the original index in order to continue tracing after
163 * returning from the function.
164 */
165 if (ftrace_graph_notrace_addr(trace->func))
166 return 1;
167
168 /*
169 * Stop here if tracing_threshold is set. We only write function return 166 * Stop here if tracing_threshold is set. We only write function return
170 * events to the ring buffer. 167 * events to the ring buffer.
171 */ 168 */
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index dbef0d135075..fb6bfbc5bf86 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -895,7 +895,8 @@ void trace_probe_cleanup(struct trace_probe *tp)
895 for (i = 0; i < tp->nr_args; i++) 895 for (i = 0; i < tp->nr_args; i++)
896 traceprobe_free_probe_arg(&tp->args[i]); 896 traceprobe_free_probe_arg(&tp->args[i]);
897 897
898 kfree(call->class->system); 898 if (call->class)
899 kfree(call->class->system);
899 kfree(call->name); 900 kfree(call->name);
900 kfree(call->print_fmt); 901 kfree(call->print_fmt);
901} 902}
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fafba1a923b..7fa97a8b5717 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -106,7 +106,6 @@ endchoice
106 106
107config KASAN_STACK_ENABLE 107config KASAN_STACK_ENABLE
108 bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST 108 bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
109 default !(CLANG_VERSION < 90000)
110 depends on KASAN 109 depends on KASAN
111 help 110 help
112 The LLVM stack address sanitizer has a know problem that 111 The LLVM stack address sanitizer has a know problem that
@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
115 Disabling asan-stack makes it safe to run kernels build 114 Disabling asan-stack makes it safe to run kernels build
116 with clang-8 with KASAN enabled, though it loses some of 115 with clang-8 with KASAN enabled, though it loses some of
117 the functionality. 116 the functionality.
118 This feature is always disabled when compile-testing with clang-8 117 This feature is always disabled when compile-testing with clang
119 or earlier to avoid cluttering the output in stack overflow 118 to avoid cluttering the output in stack overflow warnings,
120 warnings, but clang-8 users can still enable it for builds without 119 but clang users can still enable it for builds without
121 CONFIG_COMPILE_TEST. On gcc and later clang versions it is 120 CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
122 assumed to always be safe to use and enabled by default. 121 to use and enabled by default.
123 122
124config KASAN_STACK 123config KASAN_STACK
125 int 124 int
diff --git a/lib/Makefile b/lib/Makefile
index 095601ce371d..29c02a924973 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
279obj-$(CONFIG_UBSAN) += ubsan.o 279obj-$(CONFIG_UBSAN) += ubsan.o
280 280
281UBSAN_SANITIZE_ubsan.o := n 281UBSAN_SANITIZE_ubsan.o := n
282CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) 282KASAN_SANITIZE_ubsan.o := n
283CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
283 284
284obj-$(CONFIG_SBITMAP) += sbitmap.o 285obj-$(CONFIG_SBITMAP) += sbitmap.o
285 286
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index 439d641ec796..38045d6d0538 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -74,8 +74,8 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
74 delta_us); 74 delta_us);
75 curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us); 75 curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
76 if (curr_stats->epms != 0) 76 if (curr_stats->epms != 0)
77 curr_stats->cpe_ratio = 77 curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
78 (curr_stats->cpms * 100) / curr_stats->epms; 78 curr_stats->cpms * 100, curr_stats->epms);
79 else 79 else
80 curr_stats->cpe_ratio = 0; 80 curr_stats->cpe_ratio = 0;
81 81
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 5bcc902c5388..a4db51c21266 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -5,6 +5,62 @@
5 5
6#include <linux/dim.h> 6#include <linux/dim.h>
7 7
8/*
9 * Net DIM profiles:
10 * There are different set of profiles for each CQ period mode.
11 * There are different set of profiles for RX/TX CQs.
12 * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
13 */
14#define NET_DIM_PARAMS_NUM_PROFILES 5
15#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
16#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
17#define NET_DIM_DEF_PROFILE_CQE 1
18#define NET_DIM_DEF_PROFILE_EQE 1
19
20#define NET_DIM_RX_EQE_PROFILES { \
21 {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
22 {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
23 {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
24 {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
25 {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
26}
27
28#define NET_DIM_RX_CQE_PROFILES { \
29 {2, 256}, \
30 {8, 128}, \
31 {16, 64}, \
32 {32, 64}, \
33 {64, 64} \
34}
35
36#define NET_DIM_TX_EQE_PROFILES { \
37 {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
38 {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
39 {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
40 {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
41 {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
42}
43
44#define NET_DIM_TX_CQE_PROFILES { \
45 {5, 128}, \
46 {8, 64}, \
47 {16, 32}, \
48 {32, 32}, \
49 {64, 32} \
50}
51
52static const struct dim_cq_moder
53rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
54 NET_DIM_RX_EQE_PROFILES,
55 NET_DIM_RX_CQE_PROFILES,
56};
57
58static const struct dim_cq_moder
59tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
60 NET_DIM_TX_EQE_PROFILES,
61 NET_DIM_TX_CQE_PROFILES,
62};
63
8struct dim_cq_moder 64struct dim_cq_moder
9net_dim_get_rx_moderation(u8 cq_period_mode, int ix) 65net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
10{ 66{
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 117ad0e7fbf4..70dab9ac7827 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
68{ 68{
69 size /= esize; 69 size /= esize;
70 70
71 size = roundup_pow_of_two(size); 71 if (!is_power_of_2(size))
72 size = rounddown_pow_of_two(size);
72 73
73 fifo->in = 0; 74 fifo->in = 0;
74 fifo->out = 0; 75 fifo->out = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index feea48fd1a0d..905027574e5d 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
35 struct logic_pio_hwaddr *range; 35 struct logic_pio_hwaddr *range;
36 resource_size_t start; 36 resource_size_t start;
37 resource_size_t end; 37 resource_size_t end;
38 resource_size_t mmio_sz = 0; 38 resource_size_t mmio_end = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT; 39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0; 40 int ret = 0;
41 41
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
46 end = new_range->hw_start + new_range->size; 46 end = new_range->hw_start + new_range->size;
47 47
48 mutex_lock(&io_range_mutex); 48 mutex_lock(&io_range_mutex);
49 list_for_each_entry_rcu(range, &io_range_list, list) { 49 list_for_each_entry(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) { 50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */ 51 /* range already there */
52 goto end_register; 52 goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
56 /* for MMIO ranges we need to check for overlap */ 56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size || 57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) { 58 end < range->hw_start) {
59 mmio_sz += range->size; 59 mmio_end = range->io_start + range->size;
60 } else { 60 } else {
61 ret = -EFAULT; 61 ret = -EFAULT;
62 goto end_register; 62 goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
69 69
70 /* range not registered yet, check for available space */ 70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) { 71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) { 72 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */ 73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) { 74 if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG; 75 ret = -E2BIG;
76 goto end_register; 76 goto end_register;
77 } 77 }
78 new_range->size = SZ_64K; 78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n"); 79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 } 80 }
81 new_range->io_start = mmio_sz; 81 new_range->io_start = mmio_end;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) { 82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { 83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG; 84 ret = -E2BIG;
@@ -99,6 +99,20 @@ end_register:
99} 99}
100 100
101/** 101/**
102 * logic_pio_unregister_range - unregister a logical PIO range for a host
103 * @range: pointer to the IO range which has been already registered.
104 *
105 * Unregister a previously-registered IO range node.
106 */
107void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
108{
109 mutex_lock(&io_range_mutex);
110 list_del_rcu(&range->list);
111 mutex_unlock(&io_range_mutex);
112 synchronize_rcu();
113}
114
115/**
102 * find_io_range_by_fwnode - find logical PIO range for given FW node 116 * find_io_range_by_fwnode - find logical PIO range for given FW node
103 * @fwnode: FW node handle associated with logical PIO range 117 * @fwnode: FW node handle associated with logical PIO range
104 * 118 *
@@ -108,26 +122,38 @@ end_register:
108 */ 122 */
109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode) 123struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
110{ 124{
111 struct logic_pio_hwaddr *range; 125 struct logic_pio_hwaddr *range, *found_range = NULL;
112 126
127 rcu_read_lock();
113 list_for_each_entry_rcu(range, &io_range_list, list) { 128 list_for_each_entry_rcu(range, &io_range_list, list) {
114 if (range->fwnode == fwnode) 129 if (range->fwnode == fwnode) {
115 return range; 130 found_range = range;
131 break;
132 }
116 } 133 }
117 return NULL; 134 rcu_read_unlock();
135
136 return found_range;
118} 137}
119 138
120/* Return a registered range given an input PIO token */ 139/* Return a registered range given an input PIO token */
121static struct logic_pio_hwaddr *find_io_range(unsigned long pio) 140static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
122{ 141{
123 struct logic_pio_hwaddr *range; 142 struct logic_pio_hwaddr *range, *found_range = NULL;
124 143
144 rcu_read_lock();
125 list_for_each_entry_rcu(range, &io_range_list, list) { 145 list_for_each_entry_rcu(range, &io_range_list, list) {
126 if (in_range(pio, range->io_start, range->size)) 146 if (in_range(pio, range->io_start, range->size)) {
127 return range; 147 found_range = range;
148 break;
149 }
128 } 150 }
129 pr_err("PIO entry token %lx invalid\n", pio); 151 rcu_read_unlock();
130 return NULL; 152
153 if (!found_range)
154 pr_err("PIO entry token 0x%lx invalid\n", pio);
155
156 return found_range;
131} 157}
132 158
133/** 159/**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
180{ 206{
181 struct logic_pio_hwaddr *range; 207 struct logic_pio_hwaddr *range;
182 208
209 rcu_read_lock();
183 list_for_each_entry_rcu(range, &io_range_list, list) { 210 list_for_each_entry_rcu(range, &io_range_list, list) {
184 if (range->flags != LOGIC_PIO_CPU_MMIO) 211 if (range->flags != LOGIC_PIO_CPU_MMIO)
185 continue; 212 continue;
186 if (in_range(addr, range->hw_start, range->size)) 213 if (in_range(addr, range->hw_start, range->size)) {
187 return addr - range->hw_start + range->io_start; 214 unsigned long cpuaddr;
215
216 cpuaddr = addr - range->hw_start + range->io_start;
217
218 rcu_read_unlock();
219 return cpuaddr;
220 }
188 } 221 }
189 pr_err("addr %llx not registered in io_range_list\n", 222 rcu_read_unlock();
190 (unsigned long long) addr); 223
224 pr_err("addr %pa not registered in io_range_list\n", &addr);
225
191 return ~0UL; 226 return ~0UL;
192} 227}
193 228
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 42695bc8d451..0083b5cc646c 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -66,7 +66,7 @@ CFLAGS_vpermxor1.o += $(altivec_flags)
66CFLAGS_vpermxor2.o += $(altivec_flags) 66CFLAGS_vpermxor2.o += $(altivec_flags)
67CFLAGS_vpermxor4.o += $(altivec_flags) 67CFLAGS_vpermxor4.o += $(altivec_flags)
68CFLAGS_vpermxor8.o += $(altivec_flags) 68CFLAGS_vpermxor8.o += $(altivec_flags)
69targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o 69targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
70$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE 70$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
71 $(call if_changed,unroll) 71 $(call if_changed,unroll)
72 72
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 83ea6c4e623c..6ca97a63b3d6 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -886,8 +886,11 @@ static int __init test_firmware_init(void)
886 return -ENOMEM; 886 return -ENOMEM;
887 887
888 rc = __test_firmware_config_init(); 888 rc = __test_firmware_config_init();
889 if (rc) 889 if (rc) {
890 kfree(test_fw_config);
891 pr_err("could not init firmware test config: %d\n", rc);
890 return rc; 892 return rc;
893 }
891 894
892 rc = misc_register(&test_fw_misc_device); 895 rc = misc_register(&test_fw_misc_device);
893 if (rc) { 896 if (rc) {
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 62d19f270cad..9729f271d150 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
222 * Copy the buffer to check that it's not wiped on 222 * Copy the buffer to check that it's not wiped on
223 * free(). 223 * free().
224 */ 224 */
225 buf_copy = kmalloc(size, GFP_KERNEL); 225 buf_copy = kmalloc(size, GFP_ATOMIC);
226 if (buf_copy) 226 if (buf_copy)
227 memcpy(buf_copy, buf, size); 227 memcpy(buf_copy, buf, size);
228 228
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2d1c1f241fd9..e630e7ff57f1 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
51 ns = vdso_ts->nsec; 51 ns = vdso_ts->nsec;
52 last = vd->cycle_last; 52 last = vd->cycle_last;
53 if (unlikely((s64)cycles < 0)) 53 if (unlikely((s64)cycles < 0))
54 return clock_gettime_fallback(clk, ts); 54 return -1;
55 55
56 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); 56 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
57 ns >>= vd->shift; 57 ns >>= vd->shift;
@@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
82} 82}
83 83
84static __maybe_unused int 84static __maybe_unused int
85__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) 85__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
86{ 86{
87 const struct vdso_data *vd = __arch_get_vdso_data(); 87 const struct vdso_data *vd = __arch_get_vdso_data();
88 u32 msk; 88 u32 msk;
89 89
90 /* Check for negative values or invalid clocks */ 90 /* Check for negative values or invalid clocks */
91 if (unlikely((u32) clock >= MAX_CLOCKS)) 91 if (unlikely((u32) clock >= MAX_CLOCKS))
92 goto fallback; 92 return -1;
93 93
94 /* 94 /*
95 * Convert the clockid to a bitmask and use it to check which 95 * Convert the clockid to a bitmask and use it to check which
@@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
104 } else if (msk & VDSO_RAW) { 104 } else if (msk & VDSO_RAW) {
105 return do_hres(&vd[CS_RAW], clock, ts); 105 return do_hres(&vd[CS_RAW], clock, ts);
106 } 106 }
107 return -1;
108}
109
110static __maybe_unused int
111__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
112{
113 int ret = __cvdso_clock_gettime_common(clock, ts);
107 114
108fallback: 115 if (unlikely(ret))
109 return clock_gettime_fallback(clock, ts); 116 return clock_gettime_fallback(clock, ts);
117 return 0;
110} 118}
111 119
112static __maybe_unused int 120static __maybe_unused int
@@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
115 struct __kernel_timespec ts; 123 struct __kernel_timespec ts;
116 int ret; 124 int ret;
117 125
118 if (res == NULL) 126 ret = __cvdso_clock_gettime_common(clock, &ts);
119 goto fallback;
120 127
121 ret = __cvdso_clock_gettime(clock, &ts); 128#ifdef VDSO_HAS_32BIT_FALLBACK
129 if (unlikely(ret))
130 return clock_gettime32_fallback(clock, res);
131#else
132 if (unlikely(ret))
133 ret = clock_gettime_fallback(clock, &ts);
134#endif
122 135
123 if (ret == 0) { 136 if (likely(!ret)) {
124 res->tv_sec = ts.tv_sec; 137 res->tv_sec = ts.tv_sec;
125 res->tv_nsec = ts.tv_nsec; 138 res->tv_nsec = ts.tv_nsec;
126 } 139 }
127
128 return ret; 140 return ret;
129
130fallback:
131 return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
132} 141}
133 142
134static __maybe_unused int 143static __maybe_unused int
@@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
169 178
170#ifdef VDSO_HAS_CLOCK_GETRES 179#ifdef VDSO_HAS_CLOCK_GETRES
171static __maybe_unused 180static __maybe_unused
172int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) 181int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
173{ 182{
174 const struct vdso_data *vd = __arch_get_vdso_data(); 183 const struct vdso_data *vd = __arch_get_vdso_data();
175 u64 ns; 184 u64 hrtimer_res;
176 u32 msk; 185 u32 msk;
177 u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); 186 u64 ns;
178 187
179 /* Check for negative values or invalid clocks */ 188 /* Check for negative values or invalid clocks */
180 if (unlikely((u32) clock >= MAX_CLOCKS)) 189 if (unlikely((u32) clock >= MAX_CLOCKS))
181 goto fallback; 190 return -1;
182 191
192 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
183 /* 193 /*
184 * Convert the clockid to a bitmask and use it to check which 194 * Convert the clockid to a bitmask and use it to check which
185 * clocks are handled in the VDSO directly. 195 * clocks are handled in the VDSO directly.
@@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
201 */ 211 */
202 ns = hrtimer_res; 212 ns = hrtimer_res;
203 } else { 213 } else {
204 goto fallback; 214 return -1;
205 } 215 }
206 216
207 if (res) { 217 res->tv_sec = 0;
208 res->tv_sec = 0; 218 res->tv_nsec = ns;
209 res->tv_nsec = ns;
210 }
211 219
212 return 0; 220 return 0;
221}
222
223int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
224{
225 int ret = __cvdso_clock_getres_common(clock, res);
213 226
214fallback: 227 if (unlikely(ret))
215 return clock_getres_fallback(clock, res); 228 return clock_getres_fallback(clock, res);
229 return 0;
216} 230}
217 231
218static __maybe_unused int 232static __maybe_unused int
@@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
221 struct __kernel_timespec ts; 235 struct __kernel_timespec ts;
222 int ret; 236 int ret;
223 237
224 if (res == NULL) 238 ret = __cvdso_clock_getres_common(clock, &ts);
225 goto fallback;
226 239
227 ret = __cvdso_clock_getres(clock, &ts); 240#ifdef VDSO_HAS_32BIT_FALLBACK
241 if (unlikely(ret))
242 return clock_getres32_fallback(clock, res);
243#else
244 if (unlikely(ret))
245 ret = clock_getres_fallback(clock, &ts);
246#endif
228 247
229 if (ret == 0) { 248 if (likely(!ret)) {
230 res->tv_sec = ts.tv_sec; 249 res->tv_sec = ts.tv_sec;
231 res->tv_nsec = ts.tv_nsec; 250 res->tv_nsec = ts.tv_nsec;
232 } 251 }
233
234 return ret; 252 return ret;
235
236fallback:
237 return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
238} 253}
239#endif /* VDSO_HAS_CLOCK_GETRES */ 254#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/mm/Makefile b/mm/Makefile
index 338e528ad436..d0b295c3b764 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
102obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o 102obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
103obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o 103obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
104obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o 104obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
105obj-$(CONFIG_ZONE_DEVICE) += memremap.o
105obj-$(CONFIG_HMM_MIRROR) += hmm.o 106obj-$(CONFIG_HMM_MIRROR) += hmm.o
106obj-$(CONFIG_MEMFD_CREATE) += memfd.o 107obj-$(CONFIG_MEMFD_CREATE) += memfd.o
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 83a7b614061f..798275a51887 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
21 * memory corruption is possible and we should stop execution. 21 * memory corruption is possible and we should stop execution.
22 */ 22 */
23 BUG_ON(!trylock_page(page)); 23 BUG_ON(!trylock_page(page));
24 list_del(&page->lru);
25 balloon_page_insert(b_dev_info, page); 24 balloon_page_insert(b_dev_info, page);
26 unlock_page(page); 25 unlock_page(page);
27 __count_vm_event(BALLOON_INFLATE); 26 __count_vm_event(BALLOON_INFLATE);
@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
33 * @b_dev_info: balloon device descriptor where we will insert a new page to 32 * @b_dev_info: balloon device descriptor where we will insert a new page to
34 * @pages: pages to enqueue - allocated using balloon_page_alloc. 33 * @pages: pages to enqueue - allocated using balloon_page_alloc.
35 * 34 *
36 * Driver must call it to properly enqueue a balloon pages before definitively 35 * Driver must call this function to properly enqueue balloon pages before
37 * removing it from the guest system. 36 * definitively removing them from the guest system.
38 * 37 *
39 * Return: number of pages that were enqueued. 38 * Return: number of pages that were enqueued.
40 */ 39 */
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
47 46
48 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 47 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
49 list_for_each_entry_safe(page, tmp, pages, lru) { 48 list_for_each_entry_safe(page, tmp, pages, lru) {
49 list_del(&page->lru);
50 balloon_page_enqueue_one(b_dev_info, page); 50 balloon_page_enqueue_one(b_dev_info, page);
51 n_pages++; 51 n_pages++;
52 } 52 }
@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
63 * @n_req_pages: number of requested pages. 63 * @n_req_pages: number of requested pages.
64 * 64 *
65 * Driver must call this function to properly de-allocate a previous enlisted 65 * Driver must call this function to properly de-allocate a previous enlisted
66 * balloon pages before definetively releasing it back to the guest system. 66 * balloon pages before definitively releasing it back to the guest system.
67 * This function tries to remove @n_req_pages from the ballooned pages and 67 * This function tries to remove @n_req_pages from the ballooned pages and
68 * return them to the caller in the @pages list. 68 * return them to the caller in the @pages list.
69 * 69 *
70 * Note that this function may fail to dequeue some pages temporarily empty due 70 * Note that this function may fail to dequeue some pages even if the balloon
71 * to compaction isolated pages. 71 * isn't empty - since the page list can be temporarily empty due to compaction
72 * of isolated pages.
72 * 73 *
73 * Return: number of pages that were added to the @pages list. 74 * Return: number of pages that were added to the @pages list.
74 */ 75 */
@@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
112 113
113/* 114/*
114 * balloon_page_alloc - allocates a new page for insertion into the balloon 115 * balloon_page_alloc - allocates a new page for insertion into the balloon
115 * page list. 116 * page list.
117 *
118 * Driver must call this function to properly allocate a new balloon page.
119 * Driver must call balloon_page_enqueue before definitively removing the page
120 * from the guest system.
116 * 121 *
117 * Driver must call it to properly allocate a new enlisted balloon page. 122 * Return: struct page for the allocated page or NULL on allocation failure.
118 * Driver must call balloon_page_enqueue before definitively removing it from
119 * the guest system. This function returns the page address for the recently
120 * allocated page or NULL in the case we fail to allocate a new page this turn.
121 */ 123 */
122struct page *balloon_page_alloc(void) 124struct page *balloon_page_alloc(void)
123{ 125{
@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
128EXPORT_SYMBOL_GPL(balloon_page_alloc); 130EXPORT_SYMBOL_GPL(balloon_page_alloc);
129 131
130/* 132/*
131 * balloon_page_enqueue - allocates a new page and inserts it into the balloon 133 * balloon_page_enqueue - inserts a new page into the balloon page list.
132 * page list. 134 *
133 * @b_dev_info: balloon device descriptor where we will insert a new page to 135 * @b_dev_info: balloon device descriptor where we will insert a new page
134 * @page: new page to enqueue - allocated using balloon_page_alloc. 136 * @page: new page to enqueue - allocated using balloon_page_alloc.
135 * 137 *
136 * Driver must call it to properly enqueue a new allocated balloon page 138 * Drivers must call this function to properly enqueue a new allocated balloon
137 * before definitively removing it from the guest system. 139 * page before definitively removing the page from the guest system.
138 * This function returns the page address for the recently enqueued page or 140 *
139 * NULL in the case we fail to allocate a new page this turn. 141 * Drivers must not call balloon_page_enqueue on pages that have been pushed to
142 * a list with balloon_page_push before removing them with balloon_page_pop. To
143 * enqueue a list of pages, use balloon_page_list_enqueue instead.
140 */ 144 */
141void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, 145void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
142 struct page *page) 146 struct page *page)
@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
151 155
152/* 156/*
153 * balloon_page_dequeue - removes a page from balloon's page list and returns 157 * balloon_page_dequeue - removes a page from balloon's page list and returns
154 * the its address to allow the driver release the page. 158 * its address to allow the driver to release the page.
155 * @b_dev_info: balloon device decriptor where we will grab a page from. 159 * @b_dev_info: balloon device decriptor where we will grab a page from.
156 * 160 *
157 * Driver must call it to properly de-allocate a previous enlisted balloon page 161 * Driver must call this function to properly dequeue a previously enqueued page
158 * before definetively releasing it back to the guest system. 162 * before definitively releasing it back to the guest system.
159 * This function returns the page address for the recently dequeued page or 163 *
160 * NULL in the case we find balloon's page list temporarily empty due to 164 * Caller must perform its own accounting to ensure that this
161 * compaction isolated pages. 165 * function is called only if some pages are actually enqueued.
166 *
167 * Note that this function may fail to dequeue some pages even if there are
168 * some enqueued pages - since the page list can be temporarily empty due to
169 * the compaction of isolated pages.
170 *
171 * TODO: remove the caller accounting requirements, and allow caller to wait
172 * until all pages can be dequeued.
173 *
174 * Return: struct page for the dequeued page, or NULL if no page was dequeued.
162 */ 175 */
163struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) 176struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
164{ 177{
@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
171 if (n_pages != 1) { 184 if (n_pages != 1) {
172 /* 185 /*
173 * If we are unable to dequeue a balloon page because the page 186 * If we are unable to dequeue a balloon page because the page
174 * list is empty and there is no isolated pages, then something 187 * list is empty and there are no isolated pages, then something
175 * went out of track and some balloon pages are lost. 188 * went out of track and some balloon pages are lost.
176 * BUG() here, otherwise the balloon driver may get stuck into 189 * BUG() here, otherwise the balloon driver may get stuck in
177 * an infinite loop while attempting to release all its pages. 190 * an infinite loop while attempting to release all its pages.
178 */ 191 */
179 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 192 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
224 237
225 /* 238 /*
226 * We can not easily support the no copy case here so ignore it as it 239 * We can not easily support the no copy case here so ignore it as it
227 * is unlikely to be use with ballon pages. See include/linux/hmm.h for 240 * is unlikely to be used with balloon pages. See include/linux/hmm.h
228 * user of the MIGRATE_SYNC_NO_COPY mode. 241 * for a user of the MIGRATE_SYNC_NO_COPY mode.
229 */ 242 */
230 if (mode == MIGRATE_SYNC_NO_COPY) 243 if (mode == MIGRATE_SYNC_NO_COPY)
231 return -EINVAL; 244 return -EINVAL;
diff --git a/mm/compaction.c b/mm/compaction.c
index 9e1b9acb116b..952dc2fb24e5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
842 842
843 /* 843 /*
844 * Periodically drop the lock (if held) regardless of its 844 * Periodically drop the lock (if held) regardless of its
845 * contention, to give chance to IRQs. Abort async compaction 845 * contention, to give chance to IRQs. Abort completely if
846 * if contended. 846 * a fatal signal is pending.
847 */ 847 */
848 if (!(low_pfn % SWAP_CLUSTER_MAX) 848 if (!(low_pfn % SWAP_CLUSTER_MAX)
849 && compact_unlock_should_abort(&pgdat->lru_lock, 849 && compact_unlock_should_abort(&pgdat->lru_lock,
850 flags, &locked, cc)) 850 flags, &locked, cc)) {
851 break; 851 low_pfn = 0;
852 goto fatal_pending;
853 }
852 854
853 if (!pfn_valid_within(low_pfn)) 855 if (!pfn_valid_within(low_pfn))
854 goto isolate_fail; 856 goto isolate_fail;
@@ -1060,6 +1062,7 @@ isolate_abort:
1060 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1062 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1061 nr_scanned, nr_isolated); 1063 nr_scanned, nr_isolated);
1062 1064
1065fatal_pending:
1063 cc->total_migrate_scanned += nr_scanned; 1066 cc->total_migrate_scanned += nr_scanned;
1064 if (nr_isolated) 1067 if (nr_isolated)
1065 count_compact_events(COMPACTISOLATED, nr_isolated); 1068 count_compact_events(COMPACTISOLATED, nr_isolated);
diff --git a/mm/hmm.c b/mm/hmm.c
index e1eedef129cf..16b6731a34db 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
946 * @range: range 946 * @range: range
947 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 947 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
948 * permission (for instance asking for write and range is read only), 948 * permission (for instance asking for write and range is read only),
949 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid 949 * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
950 * vma or it is illegal to access that range), number of valid pages 950 * vma or it is illegal to access that range), number of valid pages
951 * in range->pfns[] (from range start address). 951 * in range->pfns[] (from range start address).
952 * 952 *
@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
967 do { 967 do {
968 /* If range is no longer valid force retry. */ 968 /* If range is no longer valid force retry. */
969 if (!range->valid) 969 if (!range->valid)
970 return -EAGAIN; 970 return -EBUSY;
971 971
972 vma = find_vma(hmm->mm, start); 972 vma = find_vma(hmm->mm, start);
973 if (vma == NULL || (vma->vm_flags & device_vma)) 973 if (vma == NULL || (vma->vm_flags & device_vma))
@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
1062 1062
1063 do { 1063 do {
1064 /* If range is no longer valid force retry. */ 1064 /* If range is no longer valid force retry. */
1065 if (!range->valid) { 1065 if (!range->valid)
1066 up_read(&hmm->mm->mmap_sem); 1066 return -EBUSY;
1067 return -EAGAIN;
1068 }
1069 1067
1070 vma = find_vma(hmm->mm, start); 1068 vma = find_vma(hmm->mm, start);
1071 if (vma == NULL || (vma->vm_flags & device_vma)) 1069 if (vma == NULL || (vma->vm_flags & device_vma))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1334ede667a8..de1f15969e27 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -32,6 +32,7 @@
32#include <linux/shmem_fs.h> 32#include <linux/shmem_fs.h>
33#include <linux/oom.h> 33#include <linux/oom.h>
34#include <linux/numa.h> 34#include <linux/numa.h>
35#include <linux/page_owner.h>
35 36
36#include <asm/tlb.h> 37#include <asm/tlb.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -644,30 +645,40 @@ release:
644 * available 645 * available
645 * never: never stall for any thp allocation 646 * never: never stall for any thp allocation
646 */ 647 */
647static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 648static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
648{ 649{
649 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 650 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
651 gfp_t this_node = 0;
652
653#ifdef CONFIG_NUMA
654 struct mempolicy *pol;
655 /*
656 * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
657 * specified, to express a general desire to stay on the current
658 * node for optimistic allocation attempts. If the defrag mode
659 * and/or madvise hint requires the direct reclaim then we prefer
660 * to fallback to other node rather than node reclaim because that
661 * can lead to excessive reclaim even though there is free memory
662 * on other nodes. We expect that NUMA preferences are specified
663 * by memory policies.
664 */
665 pol = get_vma_policy(vma, addr);
666 if (pol->mode != MPOL_BIND)
667 this_node = __GFP_THISNODE;
668 mpol_cond_put(pol);
669#endif
650 670
651 /* Always do synchronous compaction */
652 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 671 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
653 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 672 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
654
655 /* Kick kcompactd and fail quickly */
656 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 673 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
657 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 674 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
658
659 /* Synchronous compaction if madvised, otherwise kick kcompactd */
660 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 675 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
661 return GFP_TRANSHUGE_LIGHT | 676 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
662 (vma_madvised ? __GFP_DIRECT_RECLAIM : 677 __GFP_KSWAPD_RECLAIM | this_node);
663 __GFP_KSWAPD_RECLAIM);
664
665 /* Only do synchronous compaction if madvised */
666 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 678 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
667 return GFP_TRANSHUGE_LIGHT | 679 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
668 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 680 this_node);
669 681 return GFP_TRANSHUGE_LIGHT | this_node;
670 return GFP_TRANSHUGE_LIGHT;
671} 682}
672 683
673/* Caller must hold page table lock. */ 684/* Caller must hold page table lock. */
@@ -739,8 +750,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
739 pte_free(vma->vm_mm, pgtable); 750 pte_free(vma->vm_mm, pgtable);
740 return ret; 751 return ret;
741 } 752 }
742 gfp = alloc_hugepage_direct_gfpmask(vma); 753 gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
743 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 754 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
744 if (unlikely(!page)) { 755 if (unlikely(!page)) {
745 count_vm_event(THP_FAULT_FALLBACK); 756 count_vm_event(THP_FAULT_FALLBACK);
746 return VM_FAULT_FALLBACK; 757 return VM_FAULT_FALLBACK;
@@ -1347,8 +1358,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1347alloc: 1358alloc:
1348 if (__transparent_hugepage_enabled(vma) && 1359 if (__transparent_hugepage_enabled(vma) &&
1349 !transparent_hugepage_debug_cow()) { 1360 !transparent_hugepage_debug_cow()) {
1350 huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1361 huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
1351 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1362 new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
1363 haddr, numa_node_id());
1352 } else 1364 } else
1353 new_page = NULL; 1365 new_page = NULL;
1354 1366
@@ -2505,6 +2517,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2505 } 2517 }
2506 2518
2507 ClearPageCompound(head); 2519 ClearPageCompound(head);
2520
2521 split_page_owner(head, HPAGE_PMD_ORDER);
2522
2508 /* See comment in __split_huge_page_tail() */ 2523 /* See comment in __split_huge_page_tail() */
2509 if (PageAnon(head)) { 2524 if (PageAnon(head)) {
2510 /* Additional pin to swap cache */ 2525 /* Additional pin to swap cache */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ede7e7f5d1ab..6d7296dd11b8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3856,6 +3856,25 @@ retry:
3856 3856
3857 page = alloc_huge_page(vma, haddr, 0); 3857 page = alloc_huge_page(vma, haddr, 0);
3858 if (IS_ERR(page)) { 3858 if (IS_ERR(page)) {
3859 /*
3860 * Returning error will result in faulting task being
3861 * sent SIGBUS. The hugetlb fault mutex prevents two
3862 * tasks from racing to fault in the same page which
3863 * could result in false unable to allocate errors.
3864 * Page migration does not take the fault mutex, but
3865 * does a clear then write of pte's under page table
3866 * lock. Page fault code could race with migration,
3867 * notice the clear pte and try to allocate a page
3868 * here. Before returning error, get ptl and make
3869 * sure there really is no pte entry.
3870 */
3871 ptl = huge_pte_lock(h, mm, ptep);
3872 if (!huge_pte_none(huge_ptep_get(ptep))) {
3873 ret = 0;
3874 spin_unlock(ptl);
3875 goto out;
3876 }
3877 spin_unlock(ptl);
3859 ret = vmf_error(PTR_ERR(page)); 3878 ret = vmf_error(PTR_ERR(page));
3860 goto out; 3879 goto out;
3861 } 3880 }
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 2277b82902d8..95d16a42db6b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -407,8 +407,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
407 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 407 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 return shadow_byte < 0 || 408 return shadow_byte < 0 ||
409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410 else 410
411 return tag != (u8)shadow_byte; 411 /* else CONFIG_KASAN_SW_TAGS: */
412 if ((u8)shadow_byte == KASAN_TAG_INVALID)
413 return true;
414 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415 return true;
416
417 return false;
412} 418}
413 419
414static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 420static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index dbbd518fb6b3..f6e602918dac 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -114,7 +114,7 @@
114/* GFP bitmask for kmemleak internal allocations */ 114/* GFP bitmask for kmemleak internal allocations */
115#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 115#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
116 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 116 __GFP_NORETRY | __GFP_NOMEMALLOC | \
117 __GFP_NOWARN | __GFP_NOFAIL) 117 __GFP_NOWARN)
118 118
119/* scanning area inside a memory block */ 119/* scanning area inside a memory block */
120struct kmemleak_scan_area { 120struct kmemleak_scan_area {
@@ -1966,6 +1966,7 @@ static void kmemleak_disable(void)
1966 1966
1967 /* stop any memory operation tracing */ 1967 /* stop any memory operation tracing */
1968 kmemleak_enabled = 0; 1968 kmemleak_enabled = 0;
1969 kmemleak_early_log = 0;
1969 1970
1970 /* check whether it is too early for a kernel thread */ 1971 /* check whether it is too early for a kernel thread */
1971 if (kmemleak_initialized) 1972 if (kmemleak_initialized)
@@ -2009,7 +2010,6 @@ void __init kmemleak_init(void)
2009 2010
2010#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 2011#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2011 if (!kmemleak_skip_disable) { 2012 if (!kmemleak_skip_disable) {
2012 kmemleak_early_log = 0;
2013 kmemleak_disable(); 2013 kmemleak_disable();
2014 return; 2014 return;
2015 } 2015 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cdbb7a84cb6e..9ec5e12486a7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
752 /* Update memcg */ 752 /* Update memcg */
753 __mod_memcg_state(memcg, idx, val); 753 __mod_memcg_state(memcg, idx, val);
754 754
755 /* Update lruvec */
756 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
757
755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 758 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 759 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 struct mem_cgroup_per_node *pi; 760 struct mem_cgroup_per_node *pi;
758 761
759 /*
760 * Batch local counters to keep them in sync with
761 * the hierarchical ones.
762 */
763 __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
764 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 762 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
765 atomic_long_add(x, &pi->lruvec_stat[idx]); 763 atomic_long_add(x, &pi->lruvec_stat[idx]);
766 x = 0; 764 x = 0;
@@ -768,6 +766,26 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
768 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 766 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
769} 767}
770 768
769void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
770{
771 struct page *page = virt_to_head_page(p);
772 pg_data_t *pgdat = page_pgdat(page);
773 struct mem_cgroup *memcg;
774 struct lruvec *lruvec;
775
776 rcu_read_lock();
777 memcg = memcg_from_slab_page(page);
778
779 /* Untracked pages have no memcg, no lruvec. Update only the node */
780 if (!memcg || memcg == root_mem_cgroup) {
781 __mod_node_page_state(pgdat, idx, val);
782 } else {
783 lruvec = mem_cgroup_lruvec(pgdat, memcg);
784 __mod_lruvec_state(lruvec, idx, val);
785 }
786 rcu_read_unlock();
787}
788
771/** 789/**
772 * __count_memcg_events - account VM events in a cgroup 790 * __count_memcg_events - account VM events in a cgroup
773 * @memcg: the memory cgroup 791 * @memcg: the memory cgroup
@@ -1130,26 +1148,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
1130 css_put(&prev->css); 1148 css_put(&prev->css);
1131} 1149}
1132 1150
1133static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1151static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1152 struct mem_cgroup *dead_memcg)
1134{ 1153{
1135 struct mem_cgroup *memcg = dead_memcg;
1136 struct mem_cgroup_reclaim_iter *iter; 1154 struct mem_cgroup_reclaim_iter *iter;
1137 struct mem_cgroup_per_node *mz; 1155 struct mem_cgroup_per_node *mz;
1138 int nid; 1156 int nid;
1139 int i; 1157 int i;
1140 1158
1141 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1159 for_each_node(nid) {
1142 for_each_node(nid) { 1160 mz = mem_cgroup_nodeinfo(from, nid);
1143 mz = mem_cgroup_nodeinfo(memcg, nid); 1161 for (i = 0; i <= DEF_PRIORITY; i++) {
1144 for (i = 0; i <= DEF_PRIORITY; i++) { 1162 iter = &mz->iter[i];
1145 iter = &mz->iter[i]; 1163 cmpxchg(&iter->position,
1146 cmpxchg(&iter->position, 1164 dead_memcg, NULL);
1147 dead_memcg, NULL);
1148 }
1149 } 1165 }
1150 } 1166 }
1151} 1167}
1152 1168
1169static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1170{
1171 struct mem_cgroup *memcg = dead_memcg;
1172 struct mem_cgroup *last;
1173
1174 do {
1175 __invalidate_reclaim_iterators(memcg, dead_memcg);
1176 last = memcg;
1177 } while ((memcg = parent_mem_cgroup(memcg)));
1178
1179 /*
1180 * When cgruop1 non-hierarchy mode is used,
1181 * parent_mem_cgroup() does not walk all the way up to the
1182 * cgroup root (root_mem_cgroup). So we have to handle
1183 * dead_memcg from cgroup root separately.
1184 */
1185 if (last != root_mem_cgroup)
1186 __invalidate_reclaim_iterators(root_mem_cgroup,
1187 dead_memcg);
1188}
1189
1153/** 1190/**
1154 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1191 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1155 * @memcg: hierarchy root 1192 * @memcg: hierarchy root
@@ -3221,6 +3258,72 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3221 } 3258 }
3222} 3259}
3223 3260
3261static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
3262{
3263 unsigned long stat[MEMCG_NR_STAT];
3264 struct mem_cgroup *mi;
3265 int node, cpu, i;
3266 int min_idx, max_idx;
3267
3268 if (slab_only) {
3269 min_idx = NR_SLAB_RECLAIMABLE;
3270 max_idx = NR_SLAB_UNRECLAIMABLE;
3271 } else {
3272 min_idx = 0;
3273 max_idx = MEMCG_NR_STAT;
3274 }
3275
3276 for (i = min_idx; i < max_idx; i++)
3277 stat[i] = 0;
3278
3279 for_each_online_cpu(cpu)
3280 for (i = min_idx; i < max_idx; i++)
3281 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3282
3283 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3284 for (i = min_idx; i < max_idx; i++)
3285 atomic_long_add(stat[i], &mi->vmstats[i]);
3286
3287 if (!slab_only)
3288 max_idx = NR_VM_NODE_STAT_ITEMS;
3289
3290 for_each_node(node) {
3291 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3292 struct mem_cgroup_per_node *pi;
3293
3294 for (i = min_idx; i < max_idx; i++)
3295 stat[i] = 0;
3296
3297 for_each_online_cpu(cpu)
3298 for (i = min_idx; i < max_idx; i++)
3299 stat[i] += per_cpu(
3300 pn->lruvec_stat_cpu->count[i], cpu);
3301
3302 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3303 for (i = min_idx; i < max_idx; i++)
3304 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3305 }
3306}
3307
3308static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3309{
3310 unsigned long events[NR_VM_EVENT_ITEMS];
3311 struct mem_cgroup *mi;
3312 int cpu, i;
3313
3314 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3315 events[i] = 0;
3316
3317 for_each_online_cpu(cpu)
3318 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3319 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3320 cpu);
3321
3322 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3323 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3324 atomic_long_add(events[i], &mi->vmevents[i]);
3325}
3326
3224#ifdef CONFIG_MEMCG_KMEM 3327#ifdef CONFIG_MEMCG_KMEM
3225static int memcg_online_kmem(struct mem_cgroup *memcg) 3328static int memcg_online_kmem(struct mem_cgroup *memcg)
3226{ 3329{
@@ -3270,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
3270 if (!parent) 3373 if (!parent)
3271 parent = root_mem_cgroup; 3374 parent = root_mem_cgroup;
3272 3375
3376 /*
3377 * Deactivate and reparent kmem_caches. Then flush percpu
3378 * slab statistics to have precise values at the parent and
3379 * all ancestor levels. It's required to keep slab stats
3380 * accurate after the reparenting of kmem_caches.
3381 */
3273 memcg_deactivate_kmem_caches(memcg, parent); 3382 memcg_deactivate_kmem_caches(memcg, parent);
3383 memcg_flush_percpu_vmstats(memcg, true);
3274 3384
3275 kmemcg_id = memcg->kmemcg_id; 3385 kmemcg_id = memcg->kmemcg_id;
3276 BUG_ON(kmemcg_id < 0); 3386 BUG_ON(kmemcg_id < 0);
@@ -4643,6 +4753,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4643{ 4753{
4644 int node; 4754 int node;
4645 4755
4756 /*
4757 * Flush percpu vmstats and vmevents to guarantee the value correctness
4758 * on parent's and all ancestor levels.
4759 */
4760 memcg_flush_percpu_vmstats(memcg, false);
4761 memcg_flush_percpu_vmevents(memcg);
4646 for_each_node(node) 4762 for_each_node(node)
4647 free_mem_cgroup_per_node_info(memcg, node); 4763 free_mem_cgroup_per_node_info(memcg, node);
4648 free_percpu(memcg->vmstats_percpu); 4764 free_percpu(memcg->vmstats_percpu);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2a9bbddb0e55..c73f09913165 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res)
132 return; 132 return;
133 release_resource(res); 133 release_resource(res);
134 kfree(res); 134 kfree(res);
135 return;
136} 135}
137 136
138#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 137#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
@@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid)
979 arch_refresh_nodedata(nid, NULL); 978 arch_refresh_nodedata(nid, NULL);
980 free_percpu(pgdat->per_cpu_nodestats); 979 free_percpu(pgdat->per_cpu_nodestats);
981 arch_free_nodedata(pgdat); 980 arch_free_nodedata(pgdat);
982 return;
983} 981}
984 982
985 983
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f48693f75b37..65e0874fce17 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
403 }, 403 },
404}; 404};
405 405
406static void migrate_page_add(struct page *page, struct list_head *pagelist, 406static int migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags); 407 unsigned long flags);
408 408
409struct queue_pages { 409struct queue_pages {
@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
429} 429}
430 430
431/* 431/*
432 * queue_pages_pmd() has three possible return values: 432 * queue_pages_pmd() has four possible return values:
433 * 1 - pages are placed on the right node or queued successfully. 433 * 0 - pages are placed on the right node or queued successfully.
434 * 0 - THP was split. 434 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435 * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing 435 * specified.
436 * page was already on a node that does not follow the policy. 436 * 2 - THP was split.
437 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438 * existing page was already on a node that does not follow the
439 * policy.
437 */ 440 */
438static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 441static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
439 unsigned long end, struct mm_walk *walk) 442 unsigned long end, struct mm_walk *walk)
@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 if (is_huge_zero_page(page)) { 454 if (is_huge_zero_page(page)) {
452 spin_unlock(ptl); 455 spin_unlock(ptl);
453 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 456 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457 ret = 2;
454 goto out; 458 goto out;
455 } 459 }
456 if (!queue_pages_required(page, qp)) { 460 if (!queue_pages_required(page, qp))
457 ret = 1;
458 goto unlock; 461 goto unlock;
459 }
460 462
461 ret = 1;
462 flags = qp->flags; 463 flags = qp->flags;
463 /* go to thp migration */ 464 /* go to thp migration */
464 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 465 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465 if (!vma_migratable(walk->vma)) { 466 if (!vma_migratable(walk->vma) ||
466 ret = -EIO; 467 migrate_page_add(page, qp->pagelist, flags)) {
468 ret = 1;
467 goto unlock; 469 goto unlock;
468 } 470 }
469
470 migrate_page_add(page, qp->pagelist, flags);
471 } else 471 } else
472 ret = -EIO; 472 ret = -EIO;
473unlock: 473unlock:
@@ -479,6 +479,13 @@ out:
479/* 479/*
480 * Scan through pages checking if pages follow certain conditions, 480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do. 481 * and move them to the pagelist if they do.
482 *
483 * queue_pages_pte_range() has three possible return values:
484 * 0 - pages are placed on the right node or queued successfully.
485 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
486 * specified.
487 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
488 * on a node that does not follow the policy.
482 */ 489 */
483static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 490static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
484 unsigned long end, struct mm_walk *walk) 491 unsigned long end, struct mm_walk *walk)
@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
488 struct queue_pages *qp = walk->private; 495 struct queue_pages *qp = walk->private;
489 unsigned long flags = qp->flags; 496 unsigned long flags = qp->flags;
490 int ret; 497 int ret;
498 bool has_unmovable = false;
491 pte_t *pte; 499 pte_t *pte;
492 spinlock_t *ptl; 500 spinlock_t *ptl;
493 501
494 ptl = pmd_trans_huge_lock(pmd, vma); 502 ptl = pmd_trans_huge_lock(pmd, vma);
495 if (ptl) { 503 if (ptl) {
496 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
497 if (ret > 0) 505 if (ret != 2)
498 return 0;
499 else if (ret < 0)
500 return ret; 506 return ret;
501 } 507 }
508 /* THP was split, fall through to pte walk */
502 509
503 if (pmd_trans_unstable(pmd)) 510 if (pmd_trans_unstable(pmd))
504 return 0; 511 return 0;
@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
519 if (!queue_pages_required(page, qp)) 526 if (!queue_pages_required(page, qp))
520 continue; 527 continue;
521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 528 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522 if (!vma_migratable(vma)) 529 /* MPOL_MF_STRICT must be specified if we get here */
530 if (!vma_migratable(vma)) {
531 has_unmovable = true;
523 break; 532 break;
524 migrate_page_add(page, qp->pagelist, flags); 533 }
534
535 /*
536 * Do not abort immediately since there may be
537 * temporary off LRU pages in the range. Still
538 * need migrate other LRU pages.
539 */
540 if (migrate_page_add(page, qp->pagelist, flags))
541 has_unmovable = true;
525 } else 542 } else
526 break; 543 break;
527 } 544 }
528 pte_unmap_unlock(pte - 1, ptl); 545 pte_unmap_unlock(pte - 1, ptl);
529 cond_resched(); 546 cond_resched();
547
548 if (has_unmovable)
549 return 1;
550
530 return addr != end ? -EIO : 0; 551 return addr != end ? -EIO : 0;
531} 552}
532 553
@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
639 * 660 *
640 * If pages found in a given range are on a set of nodes (determined by 661 * If pages found in a given range are on a set of nodes (determined by
641 * @nodes and @flags,) it's isolated and queued to the pagelist which is 662 * @nodes and @flags,) it's isolated and queued to the pagelist which is
642 * passed via @private.) 663 * passed via @private.
664 *
665 * queue_pages_range() has three possible return values:
666 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
667 * specified.
668 * 0 - queue pages successfully or no misplaced page.
669 * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
643 */ 670 */
644static int 671static int
645queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 672queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
940/* 967/*
941 * page migration, thp tail pages can be passed. 968 * page migration, thp tail pages can be passed.
942 */ 969 */
943static void migrate_page_add(struct page *page, struct list_head *pagelist, 970static int migrate_page_add(struct page *page, struct list_head *pagelist,
944 unsigned long flags) 971 unsigned long flags)
945{ 972{
946 struct page *head = compound_head(page); 973 struct page *head = compound_head(page);
@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
953 mod_node_page_state(page_pgdat(head), 980 mod_node_page_state(page_pgdat(head),
954 NR_ISOLATED_ANON + page_is_file_cache(head), 981 NR_ISOLATED_ANON + page_is_file_cache(head),
955 hpage_nr_pages(head)); 982 hpage_nr_pages(head));
983 } else if (flags & MPOL_MF_STRICT) {
984 /*
985 * Non-movable page may reach here. And, there may be
986 * temporary off LRU pages or non-LRU movable pages.
987 * Treat them as unmovable pages since they can't be
988 * isolated, so they can't be moved at the moment. It
989 * should return -EIO for this case too.
990 */
991 return -EIO;
956 } 992 }
957 } 993 }
994
995 return 0;
958} 996}
959 997
960/* page allocation callback for NUMA node migration */ 998/* page allocation callback for NUMA node migration */
@@ -1142,8 +1180,8 @@ static struct page *new_page(struct page *page, unsigned long start)
1142 } else if (PageTransHuge(page)) { 1180 } else if (PageTransHuge(page)) {
1143 struct page *thp; 1181 struct page *thp;
1144 1182
1145 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1183 thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
1146 HPAGE_PMD_ORDER); 1184 address, numa_node_id());
1147 if (!thp) 1185 if (!thp)
1148 return NULL; 1186 return NULL;
1149 prep_transhuge_page(thp); 1187 prep_transhuge_page(thp);
@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
1157} 1195}
1158#else 1196#else
1159 1197
1160static void migrate_page_add(struct page *page, struct list_head *pagelist, 1198static int migrate_page_add(struct page *page, struct list_head *pagelist,
1161 unsigned long flags) 1199 unsigned long flags)
1162{ 1200{
1201 return -EIO;
1163} 1202}
1164 1203
1165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1204int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1182 struct mempolicy *new; 1221 struct mempolicy *new;
1183 unsigned long end; 1222 unsigned long end;
1184 int err; 1223 int err;
1224 int ret;
1185 LIST_HEAD(pagelist); 1225 LIST_HEAD(pagelist);
1186 1226
1187 if (flags & ~(unsigned long)MPOL_MF_VALID) 1227 if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
1243 if (err) 1283 if (err)
1244 goto mpol_out; 1284 goto mpol_out;
1245 1285
1246 err = queue_pages_range(mm, start, end, nmask, 1286 ret = queue_pages_range(mm, start, end, nmask,
1247 flags | MPOL_MF_INVERT, &pagelist); 1287 flags | MPOL_MF_INVERT, &pagelist);
1248 if (!err) 1288
1249 err = mbind_range(mm, start, end, new); 1289 if (ret < 0) {
1290 err = -EIO;
1291 goto up_out;
1292 }
1293
1294 err = mbind_range(mm, start, end, new);
1250 1295
1251 if (!err) { 1296 if (!err) {
1252 int nr_failed = 0; 1297 int nr_failed = 0;
@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
1259 putback_movable_pages(&pagelist); 1304 putback_movable_pages(&pagelist);
1260 } 1305 }
1261 1306
1262 if (nr_failed && (flags & MPOL_MF_STRICT)) 1307 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1263 err = -EIO; 1308 err = -EIO;
1264 } else 1309 } else
1265 putback_movable_pages(&pagelist); 1310 putback_movable_pages(&pagelist);
1266 1311
1312up_out:
1267 up_write(&mm->mmap_sem); 1313 up_write(&mm->mmap_sem);
1268 mpol_out: 1314mpol_out:
1269 mpol_put(new); 1315 mpol_put(new);
1270 return err; 1316 return err;
1271} 1317}
@@ -1688,7 +1734,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1688 * freeing by another task. It is the caller's responsibility to free the 1734 * freeing by another task. It is the caller's responsibility to free the
1689 * extra reference for shared policies. 1735 * extra reference for shared policies.
1690 */ 1736 */
1691static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1737struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1692 unsigned long addr) 1738 unsigned long addr)
1693{ 1739{
1694 struct mempolicy *pol = __get_vma_policy(vma, addr); 1740 struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2037,7 +2083,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2037 * @vma: Pointer to VMA or NULL if not available. 2083 * @vma: Pointer to VMA or NULL if not available.
2038 * @addr: Virtual Address of the allocation. Must be inside the VMA. 2084 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2039 * @node: Which node to prefer for allocation (modulo policy). 2085 * @node: Which node to prefer for allocation (modulo policy).
2040 * @hugepage: for hugepages try only the preferred node if possible
2041 * 2086 *
2042 * This function allocates a page from the kernel page pool and applies 2087 * This function allocates a page from the kernel page pool and applies
2043 * a NUMA policy associated with the VMA or the current process. 2088 * a NUMA policy associated with the VMA or the current process.
@@ -2048,7 +2093,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2048 */ 2093 */
2049struct page * 2094struct page *
2050alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2095alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2051 unsigned long addr, int node, bool hugepage) 2096 unsigned long addr, int node)
2052{ 2097{
2053 struct mempolicy *pol; 2098 struct mempolicy *pol;
2054 struct page *page; 2099 struct page *page;
@@ -2066,31 +2111,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2066 goto out; 2111 goto out;
2067 } 2112 }
2068 2113
2069 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2070 int hpage_node = node;
2071
2072 /*
2073 * For hugepage allocation and non-interleave policy which
2074 * allows the current node (or other explicitly preferred
2075 * node) we only try to allocate from the current/preferred
2076 * node and don't fall back to other nodes, as the cost of
2077 * remote accesses would likely offset THP benefits.
2078 *
2079 * If the policy is interleave, or does not allow the current
2080 * node in its nodemask, we allocate the standard way.
2081 */
2082 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2083 hpage_node = pol->v.preferred_node;
2084
2085 nmask = policy_nodemask(gfp, pol);
2086 if (!nmask || node_isset(hpage_node, *nmask)) {
2087 mpol_cond_put(pol);
2088 page = __alloc_pages_node(hpage_node,
2089 gfp | __GFP_THISNODE, order);
2090 goto out;
2091 }
2092 }
2093
2094 nmask = policy_nodemask(gfp, pol); 2114 nmask = policy_nodemask(gfp, pol);
2095 preferred_nid = policy_node(gfp, pol, node); 2115 preferred_nid = policy_node(gfp, pol, node);
2096 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2116 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
diff --git a/kernel/memremap.c b/mm/memremap.c
index 6ee03a816d67..ed70c4e8e52a 100644
--- a/kernel/memremap.c
+++ b/mm/memremap.c
@@ -91,6 +91,12 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
91 wait_for_completion(&pgmap->done); 91 wait_for_completion(&pgmap->done);
92 percpu_ref_exit(pgmap->ref); 92 percpu_ref_exit(pgmap->ref);
93 } 93 }
94 /*
95 * Undo the pgmap ref assignment for the internal case as the
96 * caller may re-enable the same pgmap.
97 */
98 if (pgmap->ref == &pgmap->internal_ref)
99 pgmap->ref = NULL;
94} 100}
95 101
96static void devm_memremap_pages_release(void *data) 102static void devm_memremap_pages_release(void *data)
@@ -397,6 +403,30 @@ void __put_devmap_managed_page(struct page *page)
397 403
398 mem_cgroup_uncharge(page); 404 mem_cgroup_uncharge(page);
399 405
406 /*
407 * When a device_private page is freed, the page->mapping field
408 * may still contain a (stale) mapping value. For example, the
409 * lower bits of page->mapping may still identify the page as
410 * an anonymous page. Ultimately, this entire field is just
411 * stale and wrong, and it will cause errors if not cleared.
412 * One example is:
413 *
414 * migrate_vma_pages()
415 * migrate_vma_insert_page()
416 * page_add_new_anon_rmap()
417 * __page_set_anon_rmap()
418 * ...checks page->mapping, via PageAnon(page) call,
419 * and incorrectly concludes that the page is an
420 * anonymous page. Therefore, it incorrectly,
421 * silently fails to set up the new anon rmap.
422 *
423 * For other types of ZONE_DEVICE pages, migration is either
424 * handled differently or not done at all, so there is no need
425 * to clear page->mapping.
426 */
427 if (is_device_private_page(page))
428 page->mapping = NULL;
429
400 page->pgmap->ops->page_free(page); 430 page->pgmap->ops->page_free(page);
401 } else if (!count) 431 } else if (!count)
402 __put_page(page); 432 __put_page(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 8992741f10aa..a42858d8e00b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -767,12 +767,12 @@ recheck_buffers:
767 } 767 }
768 bh = bh->b_this_page; 768 bh = bh->b_this_page;
769 } while (bh != head); 769 } while (bh != head);
770 spin_unlock(&mapping->private_lock);
771 if (busy) { 770 if (busy) {
772 if (invalidated) { 771 if (invalidated) {
773 rc = -EAGAIN; 772 rc = -EAGAIN;
774 goto unlock_buffers; 773 goto unlock_buffers;
775 } 774 }
775 spin_unlock(&mapping->private_lock);
776 invalidate_bh_lrus(); 776 invalidate_bh_lrus();
777 invalidated = true; 777 invalidated = true;
778 goto recheck_buffers; 778 goto recheck_buffers;
@@ -805,6 +805,8 @@ recheck_buffers:
805 805
806 rc = MIGRATEPAGE_SUCCESS; 806 rc = MIGRATEPAGE_SUCCESS;
807unlock_buffers: 807unlock_buffers:
808 if (check_refs)
809 spin_unlock(&mapping->private_lock);
808 bh = head; 810 bh = head;
809 do { 811 do {
810 unlock_buffer(bh); 812 unlock_buffer(bh);
@@ -2338,16 +2340,13 @@ next:
2338static void migrate_vma_collect(struct migrate_vma *migrate) 2340static void migrate_vma_collect(struct migrate_vma *migrate)
2339{ 2341{
2340 struct mmu_notifier_range range; 2342 struct mmu_notifier_range range;
2341 struct mm_walk mm_walk; 2343 struct mm_walk mm_walk = {
2342 2344 .pmd_entry = migrate_vma_collect_pmd,
2343 mm_walk.pmd_entry = migrate_vma_collect_pmd; 2345 .pte_hole = migrate_vma_collect_hole,
2344 mm_walk.pte_entry = NULL; 2346 .vma = migrate->vma,
2345 mm_walk.pte_hole = migrate_vma_collect_hole; 2347 .mm = migrate->vma->vm_mm,
2346 mm_walk.hugetlb_entry = NULL; 2348 .private = migrate,
2347 mm_walk.test_walk = NULL; 2349 };
2348 mm_walk.vma = migrate->vma;
2349 mm_walk.mm = migrate->vma->vm_mm;
2350 mm_walk.private = migrate;
2351 2350
2352 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm, 2351 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
2353 migrate->start, 2352 migrate->start,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 272c6de1bf4e..9c9194959271 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone,
2238 unsigned int order; 2238 unsigned int order;
2239 int pages_moved = 0; 2239 int pages_moved = 0;
2240 2240
2241#ifndef CONFIG_HOLES_IN_ZONE
2242 /*
2243 * page_zone is not safe to call in this context when
2244 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2245 * anyway as we check zone boundaries in move_freepages_block().
2246 * Remove at a later date when no bug reports exist related to
2247 * grouping pages by mobility
2248 */
2249 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2250 pfn_valid(page_to_pfn(end_page)) &&
2251 page_zone(start_page) != page_zone(end_page));
2252#endif
2253 for (page = start_page; page <= end_page;) { 2241 for (page = start_page; page <= end_page;) {
2254 if (!pfn_valid_within(page_to_pfn(page))) { 2242 if (!pfn_valid_within(page_to_pfn(page))) {
2255 page++; 2243 page++;
2256 continue; 2244 continue;
2257 } 2245 }
2258 2246
2259 /* Make sure we are not inadvertently changing nodes */
2260 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2261
2262 if (!PageBuddy(page)) { 2247 if (!PageBuddy(page)) {
2263 /* 2248 /*
2264 * We assume that pages that could be isolated for 2249 * We assume that pages that could be isolated for
@@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone,
2273 continue; 2258 continue;
2274 } 2259 }
2275 2260
2261 /* Make sure we are not inadvertently changing nodes */
2262 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2263 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2264
2276 order = page_order(page); 2265 order = page_order(page);
2277 move_to_free_area(page, &zone->free_area[order], migratetype); 2266 move_to_free_area(page, &zone->free_area[order], migratetype);
2278 page += 1 << order; 2267 page += 1 << order;
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2ae6b0d..003377e24232 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1475 /* 1475 /*
1476 * No need to invalidate here it will synchronize on 1476 * No need to invalidate here it will synchronize on
1477 * against the special swap migration pte. 1477 * against the special swap migration pte.
1478 *
1479 * The assignment to subpage above was computed from a
1480 * swap PTE which results in an invalid pointer.
1481 * Since only PAGE_SIZE pages can currently be
1482 * migrated, just set it to page. This will need to be
1483 * changed when hugepage migrations to device private
1484 * memory are supported.
1478 */ 1485 */
1486 subpage = page;
1479 goto discard; 1487 goto discard;
1480 } 1488 }
1481 1489
diff --git a/mm/shmem.c b/mm/shmem.c
index 626d8c74b973..2bed4761f279 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1466,7 +1466,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
1466 1466
1467 shmem_pseudo_vma_init(&pvma, info, hindex); 1467 shmem_pseudo_vma_init(&pvma, info, hindex);
1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
1470 shmem_pseudo_vma_destroy(&pvma); 1470 shmem_pseudo_vma_destroy(&pvma);
1471 if (page) 1471 if (page)
1472 prep_transhuge_page(page); 1472 prep_transhuge_page(page);
diff --git a/mm/slub.c b/mm/slub.c
index e6c030e47364..8834563cdb4b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1432 void *old_tail = *tail ? *tail : *head; 1432 void *old_tail = *tail ? *tail : *head;
1433 int rsize; 1433 int rsize;
1434 1434
1435 if (slab_want_init_on_free(s)) 1435 if (slab_want_init_on_free(s)) {
1436 void *p = NULL;
1437
1436 do { 1438 do {
1437 object = next; 1439 object = next;
1438 next = get_freepointer(s, object); 1440 next = get_freepointer(s, object);
@@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1445 : 0; 1447 : 0;
1446 memset((char *)object + s->inuse, 0, 1448 memset((char *)object + s->inuse, 0,
1447 s->size - s->inuse - rsize); 1449 s->size - s->inuse - rsize);
1448 set_freepointer(s, object, next); 1450 set_freepointer(s, object, p);
1451 p = object;
1449 } while (object != old_tail); 1452 } while (object != old_tail);
1453 }
1450 1454
1451/* 1455/*
1452 * Compiler cannot detect this function can be removed if slab_free_hook() 1456 * Compiler cannot detect this function can be removed if slab_free_hook()
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 2a09796edef8..98e924864554 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
147 bool to_user) 147 bool to_user)
148{ 148{
149 /* Reject if object wraps past end of memory. */ 149 /* Reject if object wraps past end of memory. */
150 if (ptr + n < ptr) 150 if (ptr + (n - 1) < ptr)
151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); 151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
152 152
153 /* Reject if NULL or ZERO-allocation. */ 153 /* Reject if NULL or ZERO-allocation. */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4fa8d84599b0..7ba11e12a11f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1259,6 +1259,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1259 return false; 1259 return false;
1260 1260
1261 /* 1261 /*
1262 * First make sure the mappings are removed from all page-tables
1263 * before they are freed.
1264 */
1265 vmalloc_sync_all();
1266
1267 /*
1262 * TODO: to calculate a flush range without looping. 1268 * TODO: to calculate a flush range without looping.
1263 * The list can be up to lazy_max_pages() elements. 1269 * The list can be up to lazy_max_pages() elements.
1264 */ 1270 */
@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
3038/* 3044/*
3039 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 3045 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3040 * have one. 3046 * have one.
3047 *
3048 * The purpose of this function is to make sure the vmalloc area
3049 * mappings are identical in all page-tables in the system.
3041 */ 3050 */
3042void __weak vmalloc_sync_all(void) 3051void __weak vmalloc_sync_all(void)
3043{ 3052{
@@ -3270,9 +3279,19 @@ retry:
3270 goto overflow; 3279 goto overflow;
3271 3280
3272 /* 3281 /*
3282 * If required width exeeds current VA block, move
3283 * base downwards and then recheck.
3284 */
3285 if (base + end > va->va_end) {
3286 base = pvm_determine_end_from_reverse(&va, align) - end;
3287 term_area = area;
3288 continue;
3289 }
3290
3291 /*
3273 * If this VA does not fit, move base downwards and recheck. 3292 * If this VA does not fit, move base downwards and recheck.
3274 */ 3293 */
3275 if (base + start < va->va_start || base + end > va->va_end) { 3294 if (base + start < va->va_start) {
3276 va = node_to_va(rb_prev(&va->rb_node)); 3295 va = node_to_va(rb_prev(&va->rb_node));
3277 base = pvm_determine_end_from_reverse(&va, align) - end; 3296 base = pvm_determine_end_from_reverse(&va, align) - end;
3278 term_area = area; 3297 term_area = area;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 44df66a98f2a..a6c5d0b28321 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -88,9 +88,6 @@ struct scan_control {
88 /* Can pages be swapped as part of reclaim? */ 88 /* Can pages be swapped as part of reclaim? */
89 unsigned int may_swap:1; 89 unsigned int may_swap:1;
90 90
91 /* e.g. boosted watermark reclaim leaves slabs alone */
92 unsigned int may_shrinkslab:1;
93
94 /* 91 /*
95 * Cgroups are not reclaimed below their configured memory.low, 92 * Cgroups are not reclaimed below their configured memory.low,
96 * unless we threaten to OOM. If any cgroups are skipped due to 93 * unless we threaten to OOM. If any cgroups are skipped due to
@@ -699,7 +696,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
699 unsigned long ret, freed = 0; 696 unsigned long ret, freed = 0;
700 struct shrinker *shrinker; 697 struct shrinker *shrinker;
701 698
702 if (!mem_cgroup_is_root(memcg)) 699 /*
700 * The root memcg might be allocated even though memcg is disabled
701 * via "cgroup_disable=memory" boot parameter. This could make
702 * mem_cgroup_is_root() return false, then just run memcg slab
703 * shrink, but skip global shrink. This may result in premature
704 * oom.
705 */
706 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
703 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); 707 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
704 708
705 if (!down_read_trylock(&shrinker_rwsem)) 709 if (!down_read_trylock(&shrinker_rwsem))
@@ -2707,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2707 shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2711 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2708 node_lru_pages += lru_pages; 2712 node_lru_pages += lru_pages;
2709 2713
2710 if (sc->may_shrinkslab) { 2714 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2711 shrink_slab(sc->gfp_mask, pgdat->node_id, 2715 sc->priority);
2712 memcg, sc->priority);
2713 }
2714 2716
2715 /* Record the group's reclaim efficiency */ 2717 /* Record the group's reclaim efficiency */
2716 vmpressure(sc->gfp_mask, memcg, false, 2718 vmpressure(sc->gfp_mask, memcg, false,
@@ -3187,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3187 .may_writepage = !laptop_mode, 3189 .may_writepage = !laptop_mode,
3188 .may_unmap = 1, 3190 .may_unmap = 1,
3189 .may_swap = 1, 3191 .may_swap = 1,
3190 .may_shrinkslab = 1,
3191 }; 3192 };
3192 3193
3193 /* 3194 /*
@@ -3219,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3219 3220
3220#ifdef CONFIG_MEMCG 3221#ifdef CONFIG_MEMCG
3221 3222
3223/* Only used by soft limit reclaim. Do not reuse for anything else. */
3222unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 3224unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3223 gfp_t gfp_mask, bool noswap, 3225 gfp_t gfp_mask, bool noswap,
3224 pg_data_t *pgdat, 3226 pg_data_t *pgdat,
@@ -3231,11 +3233,11 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3231 .may_unmap = 1, 3233 .may_unmap = 1,
3232 .reclaim_idx = MAX_NR_ZONES - 1, 3234 .reclaim_idx = MAX_NR_ZONES - 1,
3233 .may_swap = !noswap, 3235 .may_swap = !noswap,
3234 .may_shrinkslab = 1,
3235 }; 3236 };
3236 unsigned long lru_pages; 3237 unsigned long lru_pages;
3237 3238
3238 set_task_reclaim_state(current, &sc.reclaim_state); 3239 WARN_ON_ONCE(!current->reclaim_state);
3240
3239 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 3241 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3240 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3242 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3241 3243
@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3253 3255
3254 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3256 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3255 3257
3256 set_task_reclaim_state(current, NULL);
3257 *nr_scanned = sc.nr_scanned; 3258 *nr_scanned = sc.nr_scanned;
3258 3259
3259 return sc.nr_reclaimed; 3260 return sc.nr_reclaimed;
@@ -3279,7 +3280,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3279 .may_writepage = !laptop_mode, 3280 .may_writepage = !laptop_mode,
3280 .may_unmap = 1, 3281 .may_unmap = 1,
3281 .may_swap = may_swap, 3282 .may_swap = may_swap,
3282 .may_shrinkslab = 1,
3283 }; 3283 };
3284 3284
3285 set_task_reclaim_state(current, &sc.reclaim_state); 3285 set_task_reclaim_state(current, &sc.reclaim_state);
@@ -3591,7 +3591,6 @@ restart:
3591 */ 3591 */
3592 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 3592 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3593 sc.may_swap = !nr_boost_reclaim; 3593 sc.may_swap = !nr_boost_reclaim;
3594 sc.may_shrinkslab = !nr_boost_reclaim;
3595 3594
3596 /* 3595 /*
3597 * Do some background aging of the anon list, to give 3596 * Do some background aging of the anon list, to give
diff --git a/mm/workingset.c b/mm/workingset.c
index e0b4edcb88c8..c963831d354f 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -380,14 +380,12 @@ void workingset_update_node(struct xa_node *node)
380 if (node->count && node->count == node->nr_values) { 380 if (node->count && node->count == node->nr_values) {
381 if (list_empty(&node->private_list)) { 381 if (list_empty(&node->private_list)) {
382 list_lru_add(&shadow_nodes, &node->private_list); 382 list_lru_add(&shadow_nodes, &node->private_list);
383 __inc_lruvec_page_state(virt_to_page(node), 383 __inc_lruvec_slab_state(node, WORKINGSET_NODES);
384 WORKINGSET_NODES);
385 } 384 }
386 } else { 385 } else {
387 if (!list_empty(&node->private_list)) { 386 if (!list_empty(&node->private_list)) {
388 list_lru_del(&shadow_nodes, &node->private_list); 387 list_lru_del(&shadow_nodes, &node->private_list);
389 __dec_lruvec_page_state(virt_to_page(node), 388 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
390 WORKINGSET_NODES);
391 } 389 }
392 } 390 }
393} 391}
@@ -480,7 +478,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
480 } 478 }
481 479
482 list_lru_isolate(lru, item); 480 list_lru_isolate(lru, item);
483 __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES); 481 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
484 482
485 spin_unlock(lru_lock); 483 spin_unlock(lru_lock);
486 484
@@ -503,7 +501,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
503 * shadow entries we were tracking ... 501 * shadow entries we were tracking ...
504 */ 502 */
505 xas_store(&xas, NULL); 503 xas_store(&xas, NULL);
506 __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); 504 __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
507 505
508out_invalid: 506out_invalid:
509 xa_unlock_irq(&mapping->i_pages); 507 xa_unlock_irq(&mapping->i_pages);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 1a029a7432ee..75b7962439ff 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/wait.h>
44#include <linux/zpool.h> 45#include <linux/zpool.h>
45#include <linux/magic.h> 46#include <linux/magic.h>
46 47
@@ -145,6 +146,8 @@ struct z3fold_header {
145 * @release_wq: workqueue for safe page release 146 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release 147 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem 148 * @inode: inode for z3fold pseudo filesystem
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
148 * 151 *
149 * This structure is allocated at pool creation time and maintains metadata 152 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool. 153 * pertaining to a particular z3fold pool.
@@ -163,8 +166,11 @@ struct z3fold_pool {
163 const struct zpool_ops *zpool_ops; 166 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq; 167 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq; 168 struct workqueue_struct *release_wq;
169 struct wait_queue_head isolate_wait;
166 struct work_struct work; 170 struct work_struct work;
167 struct inode *inode; 171 struct inode *inode;
172 bool destroying;
173 int isolated;
168}; 174};
169 175
170/* 176/*
@@ -769,6 +775,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
769 goto out_c; 775 goto out_c;
770 spin_lock_init(&pool->lock); 776 spin_lock_init(&pool->lock);
771 spin_lock_init(&pool->stale_lock); 777 spin_lock_init(&pool->stale_lock);
778 init_waitqueue_head(&pool->isolate_wait);
772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 779 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
773 if (!pool->unbuddied) 780 if (!pool->unbuddied)
774 goto out_pool; 781 goto out_pool;
@@ -808,6 +815,15 @@ out:
808 return NULL; 815 return NULL;
809} 816}
810 817
818static bool pool_isolated_are_drained(struct z3fold_pool *pool)
819{
820 bool ret;
821
822 spin_lock(&pool->lock);
823 ret = pool->isolated == 0;
824 spin_unlock(&pool->lock);
825 return ret;
826}
811/** 827/**
812 * z3fold_destroy_pool() - destroys an existing z3fold pool 828 * z3fold_destroy_pool() - destroys an existing z3fold pool
813 * @pool: the z3fold pool to be destroyed 829 * @pool: the z3fold pool to be destroyed
@@ -817,9 +833,35 @@ out:
817static void z3fold_destroy_pool(struct z3fold_pool *pool) 833static void z3fold_destroy_pool(struct z3fold_pool *pool)
818{ 834{
819 kmem_cache_destroy(pool->c_handle); 835 kmem_cache_destroy(pool->c_handle);
820 z3fold_unregister_migration(pool); 836 /*
821 destroy_workqueue(pool->release_wq); 837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
840 */
841
842 spin_lock(&pool->lock);
843 pool->destroying = true;
844 spin_unlock(&pool->lock);
845
846 /*
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
849 * workqueues.
850 */
851 wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
852
853 /*
854 * We need to destroy pool->compact_wq before pool->release_wq,
855 * as any pending work on pool->compact_wq will call
856 * queue_work(pool->release_wq, &pool->work).
857 *
858 * There are still outstanding pages until both workqueues are drained,
859 * so we cannot unregister migration until then.
860 */
861
822 destroy_workqueue(pool->compact_wq); 862 destroy_workqueue(pool->compact_wq);
863 destroy_workqueue(pool->release_wq);
864 z3fold_unregister_migration(pool);
823 kfree(pool); 865 kfree(pool);
824} 866}
825 867
@@ -1297,6 +1339,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1297 return atomic64_read(&pool->pages_nr); 1339 return atomic64_read(&pool->pages_nr);
1298} 1340}
1299 1341
1342/*
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1344 */
1345static void z3fold_dec_isolated(struct z3fold_pool *pool)
1346{
1347 assert_spin_locked(&pool->lock);
1348 VM_BUG_ON(pool->isolated <= 0);
1349 pool->isolated--;
1350
1351 /*
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1354 */
1355 if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
1356 wake_up_all(&pool->isolate_wait);
1357}
1358
1359static void z3fold_inc_isolated(struct z3fold_pool *pool)
1360{
1361 pool->isolated++;
1362}
1363
1300static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1364static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1301{ 1365{
1302 struct z3fold_header *zhdr; 1366 struct z3fold_header *zhdr;
@@ -1323,6 +1387,34 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1323 spin_lock(&pool->lock); 1387 spin_lock(&pool->lock);
1324 if (!list_empty(&page->lru)) 1388 if (!list_empty(&page->lru))
1325 list_del(&page->lru); 1389 list_del(&page->lru);
1390 /*
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1393 * proceed.
1394 */
1395 if (unlikely(pool->destroying)) {
1396 spin_unlock(&pool->lock);
1397 /*
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1401 */
1402 if (unlikely(kref_put(&zhdr->refcount,
1403 release_z3fold_page_locked))) {
1404 /*
1405 * If we get here we have kref problems, so we
1406 * should freak out.
1407 */
1408 WARN(1, "Z3fold is experiencing kref problems\n");
1409 z3fold_page_unlock(zhdr);
1410 return false;
1411 }
1412 z3fold_page_unlock(zhdr);
1413 return false;
1414 }
1415
1416
1417 z3fold_inc_isolated(pool);
1326 spin_unlock(&pool->lock); 1418 spin_unlock(&pool->lock);
1327 z3fold_page_unlock(zhdr); 1419 z3fold_page_unlock(zhdr);
1328 return true; 1420 return true;
@@ -1391,6 +1483,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
1391 1483
1392 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1484 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1393 1485
1486 spin_lock(&pool->lock);
1487 z3fold_dec_isolated(pool);
1488 spin_unlock(&pool->lock);
1489
1394 page_mapcount_reset(page); 1490 page_mapcount_reset(page);
1395 put_page(page); 1491 put_page(page);
1396 return 0; 1492 return 0;
@@ -1410,10 +1506,14 @@ static void z3fold_page_putback(struct page *page)
1410 INIT_LIST_HEAD(&page->lru); 1506 INIT_LIST_HEAD(&page->lru);
1411 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1507 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1412 atomic64_dec(&pool->pages_nr); 1508 atomic64_dec(&pool->pages_nr);
1509 spin_lock(&pool->lock);
1510 z3fold_dec_isolated(pool);
1511 spin_unlock(&pool->lock);
1413 return; 1512 return;
1414 } 1513 }
1415 spin_lock(&pool->lock); 1514 spin_lock(&pool->lock);
1416 list_add(&page->lru, &pool->lru); 1515 list_add(&page->lru, &pool->lru);
1516 z3fold_dec_isolated(pool);
1417 spin_unlock(&pool->lock); 1517 spin_unlock(&pool->lock);
1418 z3fold_page_unlock(zhdr); 1518 z3fold_page_unlock(zhdr);
1419} 1519}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 57fbb7ced69f..e98bb6ab4f7e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -54,6 +54,7 @@
54#include <linux/mount.h> 54#include <linux/mount.h>
55#include <linux/pseudo_fs.h> 55#include <linux/pseudo_fs.h>
56#include <linux/migrate.h> 56#include <linux/migrate.h>
57#include <linux/wait.h>
57#include <linux/pagemap.h> 58#include <linux/pagemap.h>
58#include <linux/fs.h> 59#include <linux/fs.h>
59 60
@@ -268,6 +269,10 @@ struct zs_pool {
268#ifdef CONFIG_COMPACTION 269#ifdef CONFIG_COMPACTION
269 struct inode *inode; 270 struct inode *inode;
270 struct work_struct free_work; 271 struct work_struct free_work;
272 /* A wait queue for when migration races with async_free_zspage() */
273 struct wait_queue_head migration_wait;
274 atomic_long_t isolated_pages;
275 bool destroying;
271#endif 276#endif
272}; 277};
273 278
@@ -1862,6 +1867,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
1862 zspage->isolated--; 1867 zspage->isolated--;
1863} 1868}
1864 1869
1870static void putback_zspage_deferred(struct zs_pool *pool,
1871 struct size_class *class,
1872 struct zspage *zspage)
1873{
1874 enum fullness_group fg;
1875
1876 fg = putback_zspage(class, zspage);
1877 if (fg == ZS_EMPTY)
1878 schedule_work(&pool->free_work);
1879
1880}
1881
1882static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1883{
1884 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1885 atomic_long_dec(&pool->isolated_pages);
1886 /*
1887 * There's no possibility of racing, since wait_for_isolated_drain()
1888 * checks the isolated count under &class->lock after enqueuing
1889 * on migration_wait.
1890 */
1891 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1892 wake_up_all(&pool->migration_wait);
1893}
1894
1865static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1895static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1866 struct page *newpage, struct page *oldpage) 1896 struct page *newpage, struct page *oldpage)
1867{ 1897{
@@ -1931,6 +1961,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1931 */ 1961 */
1932 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { 1962 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1933 get_zspage_mapping(zspage, &class_idx, &fullness); 1963 get_zspage_mapping(zspage, &class_idx, &fullness);
1964 atomic_long_inc(&pool->isolated_pages);
1934 remove_zspage(class, zspage, fullness); 1965 remove_zspage(class, zspage, fullness);
1935 } 1966 }
1936 1967
@@ -2030,8 +2061,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2030 * Page migration is done so let's putback isolated zspage to 2061 * Page migration is done so let's putback isolated zspage to
2031 * the list if @page is final isolated subpage in the zspage. 2062 * the list if @page is final isolated subpage in the zspage.
2032 */ 2063 */
2033 if (!is_zspage_isolated(zspage)) 2064 if (!is_zspage_isolated(zspage)) {
2034 putback_zspage(class, zspage); 2065 /*
2066 * We cannot race with zs_destroy_pool() here because we wait
2067 * for isolation to hit zero before we start destroying.
2068 * Also, we ensure that everyone can see pool->destroying before
2069 * we start waiting.
2070 */
2071 putback_zspage_deferred(pool, class, zspage);
2072 zs_pool_dec_isolated(pool);
2073 }
2035 2074
2036 reset_page(page); 2075 reset_page(page);
2037 put_page(page); 2076 put_page(page);
@@ -2077,13 +2116,12 @@ static void zs_page_putback(struct page *page)
2077 spin_lock(&class->lock); 2116 spin_lock(&class->lock);
2078 dec_zspage_isolation(zspage); 2117 dec_zspage_isolation(zspage);
2079 if (!is_zspage_isolated(zspage)) { 2118 if (!is_zspage_isolated(zspage)) {
2080 fg = putback_zspage(class, zspage);
2081 /* 2119 /*
2082 * Due to page_lock, we cannot free zspage immediately 2120 * Due to page_lock, we cannot free zspage immediately
2083 * so let's defer. 2121 * so let's defer.
2084 */ 2122 */
2085 if (fg == ZS_EMPTY) 2123 putback_zspage_deferred(pool, class, zspage);
2086 schedule_work(&pool->free_work); 2124 zs_pool_dec_isolated(pool);
2087 } 2125 }
2088 spin_unlock(&class->lock); 2126 spin_unlock(&class->lock);
2089} 2127}
@@ -2107,8 +2145,36 @@ static int zs_register_migration(struct zs_pool *pool)
2107 return 0; 2145 return 0;
2108} 2146}
2109 2147
2148static bool pool_isolated_are_drained(struct zs_pool *pool)
2149{
2150 return atomic_long_read(&pool->isolated_pages) == 0;
2151}
2152
2153/* Function for resolving migration */
2154static void wait_for_isolated_drain(struct zs_pool *pool)
2155{
2156
2157 /*
2158 * We're in the process of destroying the pool, so there are no
2159 * active allocations. zs_page_isolate() fails for completely free
2160 * zspages, so we need only wait for the zs_pool's isolated
2161 * count to hit zero.
2162 */
2163 wait_event(pool->migration_wait,
2164 pool_isolated_are_drained(pool));
2165}
2166
2110static void zs_unregister_migration(struct zs_pool *pool) 2167static void zs_unregister_migration(struct zs_pool *pool)
2111{ 2168{
2169 pool->destroying = true;
2170 /*
2171 * We need a memory barrier here to ensure global visibility of
2172 * pool->destroying. Thus pool->isolated pages will either be 0 in which
2173 * case we don't care, or it will be > 0 and pool->destroying will
2174 * ensure that we wake up once isolation hits 0.
2175 */
2176 smp_mb();
2177 wait_for_isolated_drain(pool); /* This can block */
2112 flush_work(&pool->free_work); 2178 flush_work(&pool->free_work);
2113 iput(pool->inode); 2179 iput(pool->inode);
2114} 2180}
@@ -2346,6 +2412,10 @@ struct zs_pool *zs_create_pool(const char *name)
2346 if (!pool->name) 2412 if (!pool->name)
2347 goto err; 2413 goto err;
2348 2414
2415#ifdef CONFIG_COMPACTION
2416 init_waitqueue_head(&pool->migration_wait);
2417#endif
2418
2349 if (create_cache(pool)) 2419 if (create_cache(pool))
2350 goto err; 2420 goto err;
2351 2421
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 240ed70912d6..d78938e3e008 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached 277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
278 * @buff_pos: current position in the skb 278 * @buff_pos: current position in the skb
279 * @packet_len: total length of the skb 279 * @packet_len: total length of the skb
280 * @tvlv_len: tvlv length of the previously considered OGM 280 * @ogm_packet: potential OGM in buffer
281 * 281 *
282 * Return: true if there is enough space for another OGM, false otherwise. 282 * Return: true if there is enough space for another OGM, false otherwise.
283 */ 283 */
284static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, 284static bool
285 __be16 tvlv_len) 285batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
286 const struct batadv_ogm_packet *ogm_packet)
286{ 287{
287 int next_buff_pos = 0; 288 int next_buff_pos = 0;
288 289
289 next_buff_pos += buff_pos + BATADV_OGM_HLEN; 290 /* check if there is enough space for the header */
290 next_buff_pos += ntohs(tvlv_len); 291 next_buff_pos += buff_pos + sizeof(*ogm_packet);
292 if (next_buff_pos > packet_len)
293 return false;
294
295 /* check if there is enough space for the optional TVLV */
296 next_buff_pos += ntohs(ogm_packet->tvlv_len);
291 297
292 return (next_buff_pos <= packet_len) && 298 return (next_buff_pos <= packet_len) &&
293 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); 299 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
315 321
316 /* adjust all flags and log packets */ 322 /* adjust all flags and log packets */
317 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 323 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
318 batadv_ogm_packet->tvlv_len)) { 324 batadv_ogm_packet)) {
319 /* we might have aggregated direct link packets with an 325 /* we might have aggregated direct link packets with an
320 * ordinary base packet 326 * ordinary base packet
321 */ 327 */
@@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1704 1710
1705 /* unpack the aggregated packets and process them one by one */ 1711 /* unpack the aggregated packets and process them one by one */
1706 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 1712 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1707 ogm_packet->tvlv_len)) { 1713 ogm_packet)) {
1708 batadv_iv_ogm_process(skb, ogm_offset, if_incoming); 1714 batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
1709 1715
1710 ogm_offset += BATADV_OGM_HLEN; 1716 ogm_offset += BATADV_OGM_HLEN;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index fad95ef64e01..bc06e3cdfa84 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated 631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
632 * @buff_pos: current position in the skb 632 * @buff_pos: current position in the skb
633 * @packet_len: total length of the skb 633 * @packet_len: total length of the skb
634 * @tvlv_len: tvlv length of the previously considered OGM 634 * @ogm2_packet: potential OGM2 in buffer
635 * 635 *
636 * Return: true if there is enough space for another OGM, false otherwise. 636 * Return: true if there is enough space for another OGM, false otherwise.
637 */ 637 */
638static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, 638static bool
639 __be16 tvlv_len) 639batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
640 const struct batadv_ogm2_packet *ogm2_packet)
640{ 641{
641 int next_buff_pos = 0; 642 int next_buff_pos = 0;
642 643
643 next_buff_pos += buff_pos + BATADV_OGM2_HLEN; 644 /* check if there is enough space for the header */
644 next_buff_pos += ntohs(tvlv_len); 645 next_buff_pos += buff_pos + sizeof(*ogm2_packet);
646 if (next_buff_pos > packet_len)
647 return false;
648
649 /* check if there is enough space for the optional TVLV */
650 next_buff_pos += ntohs(ogm2_packet->tvlv_len);
645 651
646 return (next_buff_pos <= packet_len) && 652 return (next_buff_pos <= packet_len) &&
647 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); 653 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
818 ogm_packet = (struct batadv_ogm2_packet *)skb->data; 824 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
819 825
820 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 826 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
821 ogm_packet->tvlv_len)) { 827 ogm_packet)) {
822 batadv_v_ogm_process(skb, ogm_offset, if_incoming); 828 batadv_v_ogm_process(skb, ogm_offset, if_incoming);
823 829
824 ogm_offset += BATADV_OGM2_HLEN; 830 ogm_offset += BATADV_OGM2_HLEN;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 67d7f83009ae..1d5bdf3a4b65 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -2303,7 +2303,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2303 2303
2304 while (bucket_tmp < hash->size) { 2304 while (bucket_tmp < hash->size) {
2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2306 *bucket, &idx_tmp)) 2306 bucket_tmp, &idx_tmp))
2307 break; 2307 break;
2308 2308
2309 bucket_tmp++; 2309 bucket_tmp++;
@@ -2420,8 +2420,10 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2423 batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS); 2423 batadv_mcast_want_rtr4_update(bat_priv, orig,
2424 batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS); 2424 BATADV_MCAST_WANT_NO_RTR4);
2425 batadv_mcast_want_rtr6_update(bat_priv, orig,
2426 BATADV_MCAST_WANT_NO_RTR6);
2425 2427
2426 spin_unlock_bh(&orig->mcast_handler_lock); 2428 spin_unlock_bh(&orig->mcast_handler_lock);
2427} 2429}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 6f08fd122a8d..7e052d6f759b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
164{ 164{
165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); 165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
166 166
167 return attr ? nla_get_u32(attr) : 0; 167 return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
168} 168}
169 169
170/** 170/**
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b9585e7d9d2e..04bc79359a17 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3202,6 +3202,7 @@ struct hci_dev *hci_alloc_dev(void)
3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3205 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3205 3206
3206 mutex_init(&hdev->lock); 3207 mutex_init(&hdev->lock);
3207 mutex_init(&hdev->req_lock); 3208 mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index bb67f4a5479a..402e2cc54044 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -433,6 +433,35 @@ static int auto_accept_delay_set(void *data, u64 val)
433 return 0; 433 return 0;
434} 434}
435 435
436static int min_encrypt_key_size_set(void *data, u64 val)
437{
438 struct hci_dev *hdev = data;
439
440 if (val < 1 || val > 16)
441 return -EINVAL;
442
443 hci_dev_lock(hdev);
444 hdev->min_enc_key_size = val;
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int min_encrypt_key_size_get(void *data, u64 *val)
451{
452 struct hci_dev *hdev = data;
453
454 hci_dev_lock(hdev);
455 *val = hdev->min_enc_key_size;
456 hci_dev_unlock(hdev);
457
458 return 0;
459}
460
461DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
462 min_encrypt_key_size_get,
463 min_encrypt_key_size_set, "%llu\n");
464
436static int auto_accept_delay_get(void *data, u64 *val) 465static int auto_accept_delay_get(void *data, u64 *val)
437{ 466{
438 struct hci_dev *hdev = data; 467 struct hci_dev *hdev = data;
@@ -545,6 +574,8 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
545 if (lmp_ssp_capable(hdev)) { 574 if (lmp_ssp_capable(hdev)) {
546 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, 575 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
547 hdev, &ssp_debug_mode_fops); 576 hdev, &ssp_debug_mode_fops);
577 debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs,
578 hdev, &min_encrypt_key_size_fops);
548 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 579 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
549 hdev, &auto_accept_delay_fops); 580 hdev, &auto_accept_delay_fops);
550 } 581 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5abd423b55fa..8d889969ae7e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
101{ 101{
102 struct sk_buff *skb; 102 struct sk_buff *skb;
103 struct sock *sk = sock->sk; 103 struct sock *sk = sock->sk;
104 int ret;
104 105
105 BT_DBG("session %p data %p size %d", session, data, size); 106 BT_DBG("session %p data %p size %d", session, data, size);
106 107
@@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
114 } 115 }
115 116
116 skb_put_u8(skb, hdr); 117 skb_put_u8(skb, hdr);
117 if (data && size > 0) 118 if (data && size > 0) {
118 skb_put_data(skb, data, size); 119 skb_put_data(skb, data, size);
120 ret = size;
121 } else {
122 ret = 0;
123 }
119 124
120 skb_queue_tail(transmit, skb); 125 skb_queue_tail(transmit, skb);
121 wake_up_interruptible(sk_sleep(sk)); 126 wake_up_interruptible(sk_sleep(sk));
122 127
123 return 0; 128 return ret;
124} 129}
125 130
126static int hidp_send_ctrl_message(struct hidp_session *session, 131static int hidp_send_ctrl_message(struct hidp_session *session,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cc506fe99b4d..dfc1edb168b7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1361,7 +1361,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1361 * actually encrypted before enforcing a key size. 1361 * actually encrypted before enforcing a key size.
1362 */ 1362 */
1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1364 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); 1364 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1365} 1365}
1366 1366
1367static void l2cap_do_start(struct l2cap_chan *chan) 1367static void l2cap_do_start(struct l2cap_chan *chan)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index d164f63a4345..8a8f9e5f264f 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -37,12 +37,15 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
37 int err; 37 int err;
38 38
39 if (dev->priv_flags & IFF_EBRIDGE) { 39 if (dev->priv_flags & IFF_EBRIDGE) {
40 err = br_vlan_bridge_event(dev, event, ptr);
41 if (err)
42 return notifier_from_errno(err);
43
40 if (event == NETDEV_REGISTER) { 44 if (event == NETDEV_REGISTER) {
41 /* register of bridge completed, add sysfs entries */ 45 /* register of bridge completed, add sysfs entries */
42 br_sysfs_addbr(dev); 46 br_sysfs_addbr(dev);
43 return NOTIFY_DONE; 47 return NOTIFY_DONE;
44 } 48 }
45 br_vlan_bridge_event(dev, event, ptr);
46 } 49 }
47 50
48 /* not a port of a bridge */ 51 /* not a port of a bridge */
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3d8deac2353d..f8cac3702712 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1388,6 +1388,9 @@ br_multicast_leave_group(struct net_bridge *br,
1388 if (!br_port_group_equal(p, port, src)) 1388 if (!br_port_group_equal(p, port, src))
1389 continue; 1389 continue;
1390 1390
1391 if (p->flags & MDB_PG_FLAGS_PERMANENT)
1392 break;
1393
1391 rcu_assign_pointer(*pp, p->next); 1394 rcu_assign_pointer(*pp, p->next);
1392 hlist_del_init(&p->mglist); 1395 hlist_del_init(&p->mglist);
1393 del_timer(&p->timer); 1396 del_timer(&p->timer);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index e8cf03b43b7d..646504db0220 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -894,8 +894,8 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
894void br_vlan_get_stats(const struct net_bridge_vlan *v, 894void br_vlan_get_stats(const struct net_bridge_vlan *v,
895 struct br_vlan_stats *stats); 895 struct br_vlan_stats *stats);
896void br_vlan_port_event(struct net_bridge_port *p, unsigned long event); 896void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
897void br_vlan_bridge_event(struct net_device *dev, unsigned long event, 897int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
898 void *ptr); 898 void *ptr);
899 899
900static inline struct net_bridge_vlan_group *br_vlan_group( 900static inline struct net_bridge_vlan_group *br_vlan_group(
901 const struct net_bridge *br) 901 const struct net_bridge *br)
@@ -1085,9 +1085,10 @@ static inline void br_vlan_port_event(struct net_bridge_port *p,
1085{ 1085{
1086} 1086}
1087 1087
1088static inline void br_vlan_bridge_event(struct net_device *dev, 1088static inline int br_vlan_bridge_event(struct net_device *dev,
1089 unsigned long event, void *ptr) 1089 unsigned long event, void *ptr)
1090{ 1090{
1091 return 0;
1091} 1092}
1092#endif 1093#endif
1093 1094
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 021cc9f66804..f5b2aeebbfe9 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1053,7 +1053,6 @@ int br_vlan_init(struct net_bridge *br)
1053{ 1053{
1054 struct net_bridge_vlan_group *vg; 1054 struct net_bridge_vlan_group *vg;
1055 int ret = -ENOMEM; 1055 int ret = -ENOMEM;
1056 bool changed;
1057 1056
1058 vg = kzalloc(sizeof(*vg), GFP_KERNEL); 1057 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1059 if (!vg) 1058 if (!vg)
@@ -1068,17 +1067,10 @@ int br_vlan_init(struct net_bridge *br)
1068 br->vlan_proto = htons(ETH_P_8021Q); 1067 br->vlan_proto = htons(ETH_P_8021Q);
1069 br->default_pvid = 1; 1068 br->default_pvid = 1;
1070 rcu_assign_pointer(br->vlgrp, vg); 1069 rcu_assign_pointer(br->vlgrp, vg);
1071 ret = br_vlan_add(br, 1,
1072 BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1073 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1074 if (ret)
1075 goto err_vlan_add;
1076 1070
1077out: 1071out:
1078 return ret; 1072 return ret;
1079 1073
1080err_vlan_add:
1081 vlan_tunnel_deinit(vg);
1082err_tunnel_init: 1074err_tunnel_init:
1083 rhashtable_destroy(&vg->vlan_hash); 1075 rhashtable_destroy(&vg->vlan_hash);
1084err_rhtbl: 1076err_rhtbl:
@@ -1464,13 +1456,23 @@ static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1464} 1456}
1465 1457
1466/* Must be protected by RTNL. */ 1458/* Must be protected by RTNL. */
1467void br_vlan_bridge_event(struct net_device *dev, unsigned long event, 1459int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1468 void *ptr)
1469{ 1460{
1470 struct netdev_notifier_changeupper_info *info; 1461 struct netdev_notifier_changeupper_info *info;
1471 struct net_bridge *br; 1462 struct net_bridge *br = netdev_priv(dev);
1463 bool changed;
1464 int ret = 0;
1472 1465
1473 switch (event) { 1466 switch (event) {
1467 case NETDEV_REGISTER:
1468 ret = br_vlan_add(br, br->default_pvid,
1469 BRIDGE_VLAN_INFO_PVID |
1470 BRIDGE_VLAN_INFO_UNTAGGED |
1471 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1472 break;
1473 case NETDEV_UNREGISTER:
1474 br_vlan_delete(br, br->default_pvid);
1475 break;
1474 case NETDEV_CHANGEUPPER: 1476 case NETDEV_CHANGEUPPER:
1475 info = ptr; 1477 info = ptr;
1476 br_vlan_upper_change(dev, info->upper_dev, info->linking); 1478 br_vlan_upper_change(dev, info->upper_dev, info->linking);
@@ -1478,12 +1480,13 @@ void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
1478 1480
1479 case NETDEV_CHANGE: 1481 case NETDEV_CHANGE:
1480 case NETDEV_UP: 1482 case NETDEV_UP:
1481 br = netdev_priv(dev);
1482 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING)) 1483 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1483 return; 1484 break;
1484 br_vlan_link_state_change(dev, br); 1485 br_vlan_link_state_change(dev, br);
1485 break; 1486 break;
1486 } 1487 }
1488
1489 return ret;
1487} 1490}
1488 1491
1489/* Must be protected by RTNL. */ 1492/* Must be protected by RTNL. */
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 154fa558bb90..5040fe43f4b4 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -6,7 +6,7 @@
6menuconfig NF_TABLES_BRIDGE 6menuconfig NF_TABLES_BRIDGE
7 depends on BRIDGE && NETFILTER && NF_TABLES 7 depends on BRIDGE && NETFILTER && NF_TABLES
8 select NETFILTER_FAMILY_BRIDGE 8 select NETFILTER_FAMILY_BRIDGE
9 bool "Ethernet Bridge nf_tables support" 9 tristate "Ethernet Bridge nf_tables support"
10 10
11if NF_TABLES_BRIDGE 11if NF_TABLES_BRIDGE
12 12
@@ -25,6 +25,8 @@ config NF_LOG_BRIDGE
25 tristate "Bridge packet logging" 25 tristate "Bridge packet logging"
26 select NF_LOG_COMMON 26 select NF_LOG_COMMON
27 27
28endif # NF_TABLES_BRIDGE
29
28config NF_CONNTRACK_BRIDGE 30config NF_CONNTRACK_BRIDGE
29 tristate "IPv4/IPV6 bridge connection tracking support" 31 tristate "IPv4/IPV6 bridge connection tracking support"
30 depends on NF_CONNTRACK 32 depends on NF_CONNTRACK
@@ -39,8 +41,6 @@ config NF_CONNTRACK_BRIDGE
39 41
40 To compile it as a module, choose M here. If unsure, say N. 42 To compile it as a module, choose M here. If unsure, say N.
41 43
42endif # NF_TABLES_BRIDGE
43
44menuconfig BRIDGE_NF_EBTABLES 44menuconfig BRIDGE_NF_EBTABLES
45 tristate "Ethernet Bridge tables (ebtables) support" 45 tristate "Ethernet Bridge tables (ebtables) support"
46 depends on BRIDGE && NETFILTER && NETFILTER_XTABLES 46 depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 963dfdc14827..4096d8a74a2b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
221 return NF_DROP; 221 return NF_DROP;
222 } 222 }
223 223
224 ADD_COUNTER(*(counter_base + i), 1, skb->len); 224 ADD_COUNTER(*(counter_base + i), skb->len, 1);
225 225
226 /* these should only watch: not modify, nor tell us 226 /* these should only watch: not modify, nor tell us
227 * what to do with the packet 227 * what to do with the packet
@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
959 continue; 959 continue;
960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
961 for (i = 0; i < nentries; i++) 961 for (i = 0; i < nentries; i++)
962 ADD_COUNTER(counters[i], counter_base[i].pcnt, 962 ADD_COUNTER(counters[i], counter_base[i].bcnt,
963 counter_base[i].bcnt); 963 counter_base[i].pcnt);
964 } 964 }
965} 965}
966 966
@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
1280 1280
1281 /* we add to the counters of the first cpu */ 1281 /* we add to the counters of the first cpu */
1282 for (i = 0; i < num_counters; i++) 1282 for (i = 0; i < num_counters; i++)
1283 ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); 1283 ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
1284 1284
1285 write_unlock_bh(&t->lock); 1285 write_unlock_bh(&t->lock);
1286 ret = 0; 1286 ret = 0;
@@ -1770,20 +1770,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
1770 return 0; 1770 return 0;
1771} 1771}
1772 1772
1773static int ebt_compat_init_offsets(unsigned int number)
1774{
1775 if (number > INT_MAX)
1776 return -EINVAL;
1777
1778 /* also count the base chain policies */
1779 number += NF_BR_NUMHOOKS;
1780
1781 return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
1782}
1773 1783
1774static int compat_table_info(const struct ebt_table_info *info, 1784static int compat_table_info(const struct ebt_table_info *info,
1775 struct compat_ebt_replace *newinfo) 1785 struct compat_ebt_replace *newinfo)
1776{ 1786{
1777 unsigned int size = info->entries_size; 1787 unsigned int size = info->entries_size;
1778 const void *entries = info->entries; 1788 const void *entries = info->entries;
1789 int ret;
1779 1790
1780 newinfo->entries_size = size; 1791 newinfo->entries_size = size;
1781 if (info->nentries) { 1792 ret = ebt_compat_init_offsets(info->nentries);
1782 int ret = xt_compat_init_offsets(NFPROTO_BRIDGE, 1793 if (ret)
1783 info->nentries); 1794 return ret;
1784 if (ret)
1785 return ret;
1786 }
1787 1795
1788 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1796 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1789 entries, newinfo); 1797 entries, newinfo);
@@ -2234,11 +2242,9 @@ static int compat_do_replace(struct net *net, void __user *user,
2234 2242
2235 xt_compat_lock(NFPROTO_BRIDGE); 2243 xt_compat_lock(NFPROTO_BRIDGE);
2236 2244
2237 if (tmp.nentries) { 2245 ret = ebt_compat_init_offsets(tmp.nentries);
2238 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2246 if (ret < 0)
2239 if (ret < 0) 2247 goto out_unlock;
2240 goto out_unlock;
2241 }
2242 2248
2243 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2249 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2244 if (ret < 0) 2250 if (ret < 0)
@@ -2261,8 +2267,10 @@ static int compat_do_replace(struct net *net, void __user *user,
2261 state.buf_kern_len = size64; 2267 state.buf_kern_len = size64;
2262 2268
2263 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2269 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2264 if (WARN_ON(ret < 0)) 2270 if (WARN_ON(ret < 0)) {
2271 vfree(entries_tmp);
2265 goto out_unlock; 2272 goto out_unlock;
2273 }
2266 2274
2267 vfree(entries_tmp); 2275 vfree(entries_tmp);
2268 tmp.entries_size = size64; 2276 tmp.entries_size = size64;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index bed66f536b34..7c9e92b2f806 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -30,13 +30,9 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
30 switch (priv->key) { 30 switch (priv->key) {
31 case NFT_META_BRI_IIFNAME: 31 case NFT_META_BRI_IIFNAME:
32 br_dev = nft_meta_get_bridge(in); 32 br_dev = nft_meta_get_bridge(in);
33 if (!br_dev)
34 goto err;
35 break; 33 break;
36 case NFT_META_BRI_OIFNAME: 34 case NFT_META_BRI_OIFNAME:
37 br_dev = nft_meta_get_bridge(out); 35 br_dev = nft_meta_get_bridge(out);
38 if (!br_dev)
39 goto err;
40 break; 36 break;
41 case NFT_META_BRI_IIFPVID: { 37 case NFT_META_BRI_IIFPVID: {
42 u16 p_pvid; 38 u16 p_pvid;
@@ -57,17 +53,15 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
57 goto err; 53 goto err;
58 54
59 br_vlan_get_proto(br_dev, &p_proto); 55 br_vlan_get_proto(br_dev, &p_proto);
60 nft_reg_store16(dest, p_proto); 56 nft_reg_store16(dest, htons(p_proto));
61 return; 57 return;
62 } 58 }
63 default: 59 default:
64 goto out; 60 return nft_meta_get_eval(expr, regs, pkt);
65 } 61 }
66 62
67 strncpy((char *)dest, br_dev->name, IFNAMSIZ); 63 strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
68 return; 64 return;
69out:
70 return nft_meta_get_eval(expr, regs, pkt);
71err: 65err:
72 regs->verdict.code = NFT_BREAK; 66 regs->verdict.code = NFT_BREAK;
73} 67}
diff --git a/net/can/gw.c b/net/can/gw.c
index 5275ddf580bc..72711053ebe6 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
1046 pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n", 1046 pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
1047 max_hops); 1047 max_hops);
1048 1048
1049 register_pernet_subsys(&cangw_pernet_ops); 1049 ret = register_pernet_subsys(&cangw_pernet_ops);
1050 if (ret)
1051 return ret;
1052
1053 ret = -ENOMEM;
1050 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), 1054 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
1051 0, 0, NULL); 1055 0, 0, NULL);
1052
1053 if (!cgw_cache) 1056 if (!cgw_cache)
1054 return -ENOMEM; 1057 goto out_cache_create;
1055 1058
1056 /* set notifier */ 1059 /* set notifier */
1057 notifier.notifier_call = cgw_notifier; 1060 notifier.notifier_call = cgw_notifier;
1058 register_netdevice_notifier(&notifier); 1061 ret = register_netdevice_notifier(&notifier);
1062 if (ret)
1063 goto out_register_notifier;
1059 1064
1060 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE, 1065 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
1061 NULL, cgw_dump_jobs, 0); 1066 NULL, cgw_dump_jobs, 0);
1062 if (ret) { 1067 if (ret)
1063 unregister_netdevice_notifier(&notifier); 1068 goto out_rtnl_register1;
1064 kmem_cache_destroy(cgw_cache); 1069
1065 return -ENOBUFS; 1070 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
1066 } 1071 cgw_create_job, NULL, 0);
1067 1072 if (ret)
1068 /* Only the first call to rtnl_register_module can fail */ 1073 goto out_rtnl_register2;
1069 rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE, 1074 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
1070 cgw_create_job, NULL, 0); 1075 cgw_remove_job, NULL, 0);
1071 rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE, 1076 if (ret)
1072 cgw_remove_job, NULL, 0); 1077 goto out_rtnl_register3;
1073 1078
1074 return 0; 1079 return 0;
1080
1081out_rtnl_register3:
1082 rtnl_unregister(PF_CAN, RTM_NEWROUTE);
1083out_rtnl_register2:
1084 rtnl_unregister(PF_CAN, RTM_GETROUTE);
1085out_rtnl_register1:
1086 unregister_netdevice_notifier(&notifier);
1087out_register_notifier:
1088 kmem_cache_destroy(cgw_cache);
1089out_cache_create:
1090 unregister_pernet_subsys(&cangw_pernet_ops);
1091
1092 return ret;
1075} 1093}
1076 1094
1077static __exit void cgw_module_exit(void) 1095static __exit void cgw_module_exit(void)
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 5d6724cee38f..4f75df40fb12 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
136 if (key) { 136 if (key) {
137 kfree(key->key); 137 kfree(key->key);
138 key->key = NULL; 138 key->key = NULL;
139 crypto_free_sync_skcipher(key->tfm); 139 if (key->tfm) {
140 key->tfm = NULL; 140 crypto_free_sync_skcipher(key->tfm);
141 key->tfm = NULL;
142 }
141 } 143 }
142} 144}
143 145
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0b2df09b2554..78ae6e8c953d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1496,7 +1496,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1496 struct ceph_osds up, acting; 1496 struct ceph_osds up, acting;
1497 bool force_resend = false; 1497 bool force_resend = false;
1498 bool unpaused = false; 1498 bool unpaused = false;
1499 bool legacy_change; 1499 bool legacy_change = false;
1500 bool split = false; 1500 bool split = false;
1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1502 bool recovery_deletes = ceph_osdmap_flag(osdc, 1502 bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1584,15 +1584,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1584 t->osd = acting.primary; 1584 t->osd = acting.primary;
1585 } 1585 }
1586 1586
1587 if (unpaused || legacy_change || force_resend || 1587 if (unpaused || legacy_change || force_resend || split)
1588 (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1589 RESEND_ON_SPLIT)))
1590 ct_res = CALC_TARGET_NEED_RESEND; 1588 ct_res = CALC_TARGET_NEED_RESEND;
1591 else 1589 else
1592 ct_res = CALC_TARGET_NO_ACTION; 1590 ct_res = CALC_TARGET_NO_ACTION;
1593 1591
1594out: 1592out:
1595 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); 1593 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1594 legacy_change, force_resend, split, ct_res, t->osd);
1596 return ct_res; 1595 return ct_res;
1597} 1596}
1598 1597
diff --git a/net/core/dev.c b/net/core/dev.c
index fc676b2610e3..0891f499c1bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4374,12 +4374,17 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4374 4374
4375 act = bpf_prog_run_xdp(xdp_prog, xdp); 4375 act = bpf_prog_run_xdp(xdp_prog, xdp);
4376 4376
4377 /* check if bpf_xdp_adjust_head was used */
4377 off = xdp->data - orig_data; 4378 off = xdp->data - orig_data;
4378 if (off > 0) 4379 if (off) {
4379 __skb_pull(skb, off); 4380 if (off > 0)
4380 else if (off < 0) 4381 __skb_pull(skb, off);
4381 __skb_push(skb, -off); 4382 else if (off < 0)
4382 skb->mac_header += off; 4383 __skb_push(skb, -off);
4384
4385 skb->mac_header += off;
4386 skb_reset_network_header(skb);
4387 }
4383 4388
4384 /* check if bpf_xdp_adjust_tail was used. it can only "shrink" 4389 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4385 * pckt. 4390 * pckt.
@@ -9701,6 +9706,8 @@ static void __net_exit default_device_exit(struct net *net)
9701 9706
9702 /* Push remaining network devices to init_net */ 9707 /* Push remaining network devices to init_net */
9703 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 9708 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9709 if (__dev_get_by_name(&init_net, fb_name))
9710 snprintf(fb_name, IFNAMSIZ, "dev%%d");
9704 err = dev_change_net_namespace(dev, &init_net, fb_name); 9711 err = dev_change_net_namespace(dev, &init_net, fb_name);
9705 if (err) { 9712 if (err) {
9706 pr_emerg("%s: failed to move %s to init_net: %d\n", 9713 pr_emerg("%s: failed to move %s to init_net: %d\n",
diff --git a/net/core/filter.c b/net/core/filter.c
index 4e2a79b2fd77..4c6a252d4212 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7455,12 +7455,12 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
7455 case offsetof(struct __sk_buff, gso_segs): 7455 case offsetof(struct __sk_buff, gso_segs):
7456 /* si->dst_reg = skb_shinfo(SKB); */ 7456 /* si->dst_reg = skb_shinfo(SKB); */
7457#ifdef NET_SKBUFF_DATA_USES_OFFSET 7457#ifdef NET_SKBUFF_DATA_USES_OFFSET
7458 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7459 si->dst_reg, si->src_reg,
7460 offsetof(struct sk_buff, head));
7461 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 7458 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7462 BPF_REG_AX, si->src_reg, 7459 BPF_REG_AX, si->src_reg,
7463 offsetof(struct sk_buff, end)); 7460 offsetof(struct sk_buff, end));
7461 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7462 si->dst_reg, si->src_reg,
7463 offsetof(struct sk_buff, head));
7464 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); 7464 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7465#else 7465#else
7466 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 7466 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
@@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
8757 return size == size_default; 8757 return size == size_default;
8758 8758
8759 /* Fields that allow narrowing */ 8759 /* Fields that allow narrowing */
8760 case offsetof(struct sk_reuseport_md, eth_protocol): 8760 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol)) 8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8762 return false; 8762 return false;
8763 /* fall through */ 8763 /* fall through */
8764 case offsetof(struct sk_reuseport_md, ip_protocol): 8764 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8765 case offsetof(struct sk_reuseport_md, bind_inany): 8765 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8766 case offsetof(struct sk_reuseport_md, len): 8766 case bpf_ctx_range(struct sk_reuseport_md, len):
8767 bpf_ctx_record_field_size(info, size_default); 8767 bpf_ctx_record_field_size(info, size_default);
8768 return bpf_ctx_narrow_access_ok(off, size, size_default); 8768 return bpf_ctx_narrow_access_ok(off, size, size_default);
8769 8769
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3e6fedb57bc1..2470b4b404e6 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
142 mutex_unlock(&flow_dissector_mutex); 142 mutex_unlock(&flow_dissector_mutex);
143 return -ENOENT; 143 return -ENOENT;
144 } 144 }
145 bpf_prog_put(attached);
146 RCU_INIT_POINTER(net->flow_dissector_prog, NULL); 145 RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
146 bpf_prog_put(attached);
147 mutex_unlock(&flow_dissector_mutex); 147 mutex_unlock(&flow_dissector_mutex);
148 return 0; 148 return 0;
149} 149}
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 76f8db3841d7..d63b970784dc 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -165,7 +165,7 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
165} 165}
166EXPORT_SYMBOL(flow_rule_match_enc_opts); 166EXPORT_SYMBOL(flow_rule_match_enc_opts);
167 167
168struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, 168struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
169 void *cb_ident, void *cb_priv, 169 void *cb_ident, void *cb_priv,
170 void (*release)(void *cb_priv)) 170 void (*release)(void *cb_priv))
171{ 171{
@@ -175,7 +175,6 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
175 if (!block_cb) 175 if (!block_cb)
176 return ERR_PTR(-ENOMEM); 176 return ERR_PTR(-ENOMEM);
177 177
178 block_cb->net = net;
179 block_cb->cb = cb; 178 block_cb->cb = cb;
180 block_cb->cb_ident = cb_ident; 179 block_cb->cb_ident = cb_ident;
181 block_cb->cb_priv = cb_priv; 180 block_cb->cb_priv = cb_priv;
@@ -194,14 +193,13 @@ void flow_block_cb_free(struct flow_block_cb *block_cb)
194} 193}
195EXPORT_SYMBOL(flow_block_cb_free); 194EXPORT_SYMBOL(flow_block_cb_free);
196 195
197struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f, 196struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
198 tc_setup_cb_t *cb, void *cb_ident) 197 flow_setup_cb_t *cb, void *cb_ident)
199{ 198{
200 struct flow_block_cb *block_cb; 199 struct flow_block_cb *block_cb;
201 200
202 list_for_each_entry(block_cb, f->driver_block_list, driver_list) { 201 list_for_each_entry(block_cb, &block->cb_list, list) {
203 if (block_cb->net == f->net && 202 if (block_cb->cb == cb &&
204 block_cb->cb == cb &&
205 block_cb->cb_ident == cb_ident) 203 block_cb->cb_ident == cb_ident)
206 return block_cb; 204 return block_cb;
207 } 205 }
@@ -228,7 +226,7 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
228} 226}
229EXPORT_SYMBOL(flow_block_cb_decref); 227EXPORT_SYMBOL(flow_block_cb_decref);
230 228
231bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, 229bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
232 struct list_head *driver_block_list) 230 struct list_head *driver_block_list)
233{ 231{
234 struct flow_block_cb *block_cb; 232 struct flow_block_cb *block_cb;
@@ -245,7 +243,8 @@ EXPORT_SYMBOL(flow_block_cb_is_busy);
245 243
246int flow_block_cb_setup_simple(struct flow_block_offload *f, 244int flow_block_cb_setup_simple(struct flow_block_offload *f,
247 struct list_head *driver_block_list, 245 struct list_head *driver_block_list,
248 tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, 246 flow_setup_cb_t *cb,
247 void *cb_ident, void *cb_priv,
249 bool ingress_only) 248 bool ingress_only)
250{ 249{
251 struct flow_block_cb *block_cb; 250 struct flow_block_cb *block_cb;
@@ -261,8 +260,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
261 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) 260 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
262 return -EBUSY; 261 return -EBUSY;
263 262
264 block_cb = flow_block_cb_alloc(f->net, cb, cb_ident, 263 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
265 cb_priv, NULL);
266 if (IS_ERR(block_cb)) 264 if (IS_ERR(block_cb))
267 return PTR_ERR(block_cb); 265 return PTR_ERR(block_cb);
268 266
@@ -270,7 +268,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
270 list_add_tail(&block_cb->driver_list, driver_block_list); 268 list_add_tail(&block_cb->driver_list, driver_block_list);
271 return 0; 269 return 0;
272 case FLOW_BLOCK_UNBIND: 270 case FLOW_BLOCK_UNBIND:
273 block_cb = flow_block_cb_lookup(f, cb, cb_ident); 271 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
274 if (!block_cb) 272 if (!block_cb)
275 return -ENOENT; 273 return -ENOENT;
276 274
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2cf27da1baeb..849380a622ef 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
122 txq = netdev_get_tx_queue(dev, q_index); 122 txq = netdev_get_tx_queue(dev, q_index);
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (netif_xmit_frozen_or_stopped(txq) || 124 if (netif_xmit_frozen_or_stopped(txq) ||
125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
126 skb_queue_head(&npinfo->txq, skb); 126 skb_queue_head(&npinfo->txq, skb);
127 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags); 128 local_irq_restore(flags);
@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
335 335
336 HARD_TX_UNLOCK(dev, txq); 336 HARD_TX_UNLOCK(dev, txq);
337 337
338 if (status == NETDEV_TX_OK) 338 if (dev_xmit_complete(status))
339 break; 339 break;
340 340
341 } 341 }
@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
352 352
353 } 353 }
354 354
355 if (status != NETDEV_TX_OK) { 355 if (!dev_xmit_complete(status)) {
356 skb_queue_tail(&npinfo->txq, skb); 356 skb_queue_tail(&npinfo->txq, skb);
357 schedule_delayed_work(&npinfo->tx_work,0); 357 schedule_delayed_work(&npinfo->tx_work,0);
358 } 358 }
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 93bffaad2135..6832eeb4b785 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -585,12 +585,12 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
585 585
586void sk_psock_drop(struct sock *sk, struct sk_psock *psock) 586void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
587{ 587{
588 rcu_assign_sk_user_data(sk, NULL);
589 sk_psock_cork_free(psock); 588 sk_psock_cork_free(psock);
590 sk_psock_zap_ingress(psock); 589 sk_psock_zap_ingress(psock);
591 sk_psock_restore_proto(sk, psock);
592 590
593 write_lock_bh(&sk->sk_callback_lock); 591 write_lock_bh(&sk->sk_callback_lock);
592 sk_psock_restore_proto(sk, psock);
593 rcu_assign_sk_user_data(sk, NULL);
594 if (psock->progs.skb_parser) 594 if (psock->progs.skb_parser)
595 sk_psock_stop_strp(sk, psock); 595 sk_psock_stop_strp(sk, psock);
596 write_unlock_bh(&sk->sk_callback_lock); 596 write_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/core/sock.c b/net/core/sock.c
index d57b0cc995a0..545fac19a711 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1992,6 +1992,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1992} 1992}
1993EXPORT_SYMBOL(skb_set_owner_w); 1993EXPORT_SYMBOL(skb_set_owner_w);
1994 1994
1995static bool can_skb_orphan_partial(const struct sk_buff *skb)
1996{
1997#ifdef CONFIG_TLS_DEVICE
1998 /* Drivers depend on in-order delivery for crypto offload,
1999 * partial orphan breaks out-of-order-OK logic.
2000 */
2001 if (skb->decrypted)
2002 return false;
2003#endif
2004 return (skb->destructor == sock_wfree ||
2005 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2006}
2007
1995/* This helper is used by netem, as it can hold packets in its 2008/* This helper is used by netem, as it can hold packets in its
1996 * delay queue. We want to allow the owner socket to send more 2009 * delay queue. We want to allow the owner socket to send more
1997 * packets, as if they were already TX completed by a typical driver. 2010 * packets, as if they were already TX completed by a typical driver.
@@ -2003,11 +2016,7 @@ void skb_orphan_partial(struct sk_buff *skb)
2003 if (skb_is_tcp_pure_ack(skb)) 2016 if (skb_is_tcp_pure_ack(skb))
2004 return; 2017 return;
2005 2018
2006 if (skb->destructor == sock_wfree 2019 if (can_skb_orphan_partial(skb)) {
2007#ifdef CONFIG_INET
2008 || skb->destructor == tcp_wfree
2009#endif
2010 ) {
2011 struct sock *sk = skb->sk; 2020 struct sock *sk = skb->sk;
2012 2021
2013 if (refcount_inc_not_zero(&sk->sk_refcnt)) { 2022 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
@@ -3278,16 +3287,17 @@ static __init int net_inuse_init(void)
3278 3287
3279core_initcall(net_inuse_init); 3288core_initcall(net_inuse_init);
3280 3289
3281static void assign_proto_idx(struct proto *prot) 3290static int assign_proto_idx(struct proto *prot)
3282{ 3291{
3283 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3292 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3284 3293
3285 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3294 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3286 pr_err("PROTO_INUSE_NR exhausted\n"); 3295 pr_err("PROTO_INUSE_NR exhausted\n");
3287 return; 3296 return -ENOSPC;
3288 } 3297 }
3289 3298
3290 set_bit(prot->inuse_idx, proto_inuse_idx); 3299 set_bit(prot->inuse_idx, proto_inuse_idx);
3300 return 0;
3291} 3301}
3292 3302
3293static void release_proto_idx(struct proto *prot) 3303static void release_proto_idx(struct proto *prot)
@@ -3296,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
3296 clear_bit(prot->inuse_idx, proto_inuse_idx); 3306 clear_bit(prot->inuse_idx, proto_inuse_idx);
3297} 3307}
3298#else 3308#else
3299static inline void assign_proto_idx(struct proto *prot) 3309static inline int assign_proto_idx(struct proto *prot)
3300{ 3310{
3311 return 0;
3301} 3312}
3302 3313
3303static inline void release_proto_idx(struct proto *prot) 3314static inline void release_proto_idx(struct proto *prot)
@@ -3346,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
3346 3357
3347int proto_register(struct proto *prot, int alloc_slab) 3358int proto_register(struct proto *prot, int alloc_slab)
3348{ 3359{
3360 int ret = -ENOBUFS;
3361
3349 if (alloc_slab) { 3362 if (alloc_slab) {
3350 prot->slab = kmem_cache_create_usercopy(prot->name, 3363 prot->slab = kmem_cache_create_usercopy(prot->name,
3351 prot->obj_size, 0, 3364 prot->obj_size, 0,
@@ -3382,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
3382 } 3395 }
3383 3396
3384 mutex_lock(&proto_list_mutex); 3397 mutex_lock(&proto_list_mutex);
3398 ret = assign_proto_idx(prot);
3399 if (ret) {
3400 mutex_unlock(&proto_list_mutex);
3401 goto out_free_timewait_sock_slab_name;
3402 }
3385 list_add(&prot->node, &proto_list); 3403 list_add(&prot->node, &proto_list);
3386 assign_proto_idx(prot);
3387 mutex_unlock(&proto_list_mutex); 3404 mutex_unlock(&proto_list_mutex);
3388 return 0; 3405 return ret;
3389 3406
3390out_free_timewait_sock_slab_name: 3407out_free_timewait_sock_slab_name:
3391 kfree(prot->twsk_prot->twsk_slab_name); 3408 if (alloc_slab && prot->twsk_prot)
3409 kfree(prot->twsk_prot->twsk_slab_name);
3392out_free_request_sock_slab: 3410out_free_request_sock_slab:
3393 req_prot_cleanup(prot->rsk_prot); 3411 if (alloc_slab) {
3412 req_prot_cleanup(prot->rsk_prot);
3394 3413
3395 kmem_cache_destroy(prot->slab); 3414 kmem_cache_destroy(prot->slab);
3396 prot->slab = NULL; 3415 prot->slab = NULL;
3416 }
3397out: 3417out:
3398 return -ENOBUFS; 3418 return ret;
3399} 3419}
3400EXPORT_SYMBOL(proto_register); 3420EXPORT_SYMBOL(proto_register);
3401 3421
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 3312a5849a97..c13ffbd33d8d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); 19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
20static DEFINE_MUTEX(sock_diag_table_mutex); 20static DEFINE_MUTEX(sock_diag_table_mutex);
21static struct workqueue_struct *broadcast_wq; 21static struct workqueue_struct *broadcast_wq;
22static atomic64_t cookie_gen;
22 23
23u64 sock_gen_cookie(struct sock *sk) 24u64 sock_gen_cookie(struct sock *sk)
24{ 25{
@@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk)
27 28
28 if (res) 29 if (res)
29 return res; 30 return res;
30 res = atomic64_inc_return(&sock_net(sk)->cookie_gen); 31 res = atomic64_inc_return(&cookie_gen);
31 atomic64_cmpxchg(&sk->sk_cookie, 0, res); 32 atomic64_cmpxchg(&sk->sk_cookie, 0, res);
32 } 33 }
33} 34}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 52d4faeee18b..1330a7442e5b 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -247,6 +247,8 @@ static void sock_map_free(struct bpf_map *map)
247 raw_spin_unlock_bh(&stab->lock); 247 raw_spin_unlock_bh(&stab->lock);
248 rcu_read_unlock(); 248 rcu_read_unlock();
249 249
250 synchronize_rcu();
251
250 bpf_map_area_free(stab->sks); 252 bpf_map_area_free(stab->sks);
251 kfree(stab); 253 kfree(stab);
252} 254}
@@ -276,16 +278,20 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
276 struct sock **psk) 278 struct sock **psk)
277{ 279{
278 struct sock *sk; 280 struct sock *sk;
281 int err = 0;
279 282
280 raw_spin_lock_bh(&stab->lock); 283 raw_spin_lock_bh(&stab->lock);
281 sk = *psk; 284 sk = *psk;
282 if (!sk_test || sk_test == sk) 285 if (!sk_test || sk_test == sk)
283 *psk = NULL; 286 sk = xchg(psk, NULL);
287
288 if (likely(sk))
289 sock_map_unref(sk, psk);
290 else
291 err = -EINVAL;
292
284 raw_spin_unlock_bh(&stab->lock); 293 raw_spin_unlock_bh(&stab->lock);
285 if (unlikely(!sk)) 294 return err;
286 return -EINVAL;
287 sock_map_unref(sk, psk);
288 return 0;
289} 295}
290 296
291static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 297static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
@@ -328,6 +334,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
328 struct sock *sk, u64 flags) 334 struct sock *sk, u64 flags)
329{ 335{
330 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 336 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
337 struct inet_connection_sock *icsk = inet_csk(sk);
331 struct sk_psock_link *link; 338 struct sk_psock_link *link;
332 struct sk_psock *psock; 339 struct sk_psock *psock;
333 struct sock *osk; 340 struct sock *osk;
@@ -338,6 +345,8 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
338 return -EINVAL; 345 return -EINVAL;
339 if (unlikely(idx >= map->max_entries)) 346 if (unlikely(idx >= map->max_entries))
340 return -E2BIG; 347 return -E2BIG;
348 if (unlikely(icsk->icsk_ulp_data))
349 return -EINVAL;
341 350
342 link = sk_psock_init_link(); 351 link = sk_psock_init_link();
343 if (!link) 352 if (!link)
diff --git a/net/core/stream.c b/net/core/stream.c
index e94bb02a5629..4f1d4aa5fb38 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
120 int err = 0; 120 int err = 0;
121 long vm_wait = 0; 121 long vm_wait = 0;
122 long current_timeo = *timeo_p; 122 long current_timeo = *timeo_p;
123 bool noblock = (*timeo_p ? false : true);
124 DEFINE_WAIT_FUNC(wait, woken_wake_function); 123 DEFINE_WAIT_FUNC(wait, woken_wake_function);
125 124
126 if (sk_stream_memory_free(sk)) 125 if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
133 132
134 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 133 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
135 goto do_error; 134 goto do_error;
136 if (!*timeo_p) { 135 if (!*timeo_p)
137 if (noblock) 136 goto do_eagain;
138 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
139 goto do_nonblock;
140 }
141 if (signal_pending(current)) 137 if (signal_pending(current))
142 goto do_interrupted; 138 goto do_interrupted;
143 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 139 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
169do_error: 165do_error:
170 err = -EPIPE; 166 err = -EPIPE;
171 goto out; 167 goto out;
172do_nonblock: 168do_eagain:
169 /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
170 * be generated later.
171 * When TCP receives ACK packets that make room, tcp_check_space()
172 * only calls tcp_new_space() if SOCK_NOSPACE is set.
173 */
174 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
173 err = -EAGAIN; 175 err = -EAGAIN;
174 goto out; 176 goto out;
175do_interrupted: 177do_interrupted:
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 614c38ece104..33f41178afcc 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
951 struct flow_block_offload *f) 951 struct flow_block_offload *f)
952{ 952{
953 struct flow_block_cb *block_cb; 953 struct flow_block_cb *block_cb;
954 tc_setup_cb_t *cb; 954 flow_setup_cb_t *cb;
955 955
956 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 956 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
957 cb = dsa_slave_setup_tc_block_cb_ig; 957 cb = dsa_slave_setup_tc_block_cb_ig;
@@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
967 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) 967 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
968 return -EBUSY; 968 return -EBUSY;
969 969
970 block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); 970 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
971 if (IS_ERR(block_cb)) 971 if (IS_ERR(block_cb))
972 return PTR_ERR(block_cb); 972 return PTR_ERR(block_cb);
973 973
@@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
975 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); 975 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
976 return 0; 976 return 0;
977 case FLOW_BLOCK_UNBIND: 977 case FLOW_BLOCK_UNBIND:
978 block_cb = flow_block_cb_lookup(f, cb, dev); 978 block_cb = flow_block_cb_lookup(f->block, cb, dev);
979 if (!block_cb) 979 if (!block_cb)
980 return -ENOENT; 980 return -ENOENT;
981 981
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 4ec5b7f85d51..09d9286b27cc 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
153{ 153{
154 int port; 154 int port;
155 155
156 if (!ds->ops->port_mdb_add)
157 return;
158
156 for_each_set_bit(port, bitmap, ds->num_ports) 159 for_each_set_bit(port, bitmap, ds->num_ports)
157 ds->ops->port_mdb_add(ds, port, mdb); 160 ds->ops->port_mdb_add(ds, port, mdb);
158} 161}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 6ebbd799c4eb..67a1bc635a7b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -28,6 +28,7 @@
28 * 28 *
29 * RSV - VID[9]: 29 * RSV - VID[9]:
30 * To be used for further expansion of SWITCH_ID or for other purposes. 30 * To be used for further expansion of SWITCH_ID or for other purposes.
31 * Must be transmitted as zero and ignored on receive.
31 * 32 *
32 * SWITCH_ID - VID[8:6]: 33 * SWITCH_ID - VID[8:6]:
33 * Index of switch within DSA tree. Must be between 0 and 34 * Index of switch within DSA tree. Must be between 0 and
@@ -35,6 +36,7 @@
35 * 36 *
36 * RSV - VID[5:4]: 37 * RSV - VID[5:4]:
37 * To be used for further expansion of PORT or for other purposes. 38 * To be used for further expansion of PORT or for other purposes.
39 * Must be transmitted as zero and ignored on receive.
38 * 40 *
39 * PORT - VID[3:0]: 41 * PORT - VID[3:0]:
40 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. 42 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 26363d72d25b..47ee88163a9d 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -165,6 +165,7 @@ static struct sk_buff
165 "Expected meta frame, is %12llx " 165 "Expected meta frame, is %12llx "
166 "in the DSA master multicast filter?\n", 166 "in the DSA master multicast filter?\n",
167 SJA1105_META_DMAC); 167 SJA1105_META_DMAC);
168 kfree_skb(sp->data->stampable_skb);
168 } 169 }
169 170
170 /* Hold a reference to avoid dsa_switch_rcv 171 /* Hold a reference to avoid dsa_switch_rcv
@@ -211,17 +212,8 @@ static struct sk_buff
211 * for further processing up the network stack. 212 * for further processing up the network stack.
212 */ 213 */
213 kfree_skb(skb); 214 kfree_skb(skb);
214 215 skb = stampable_skb;
215 skb = skb_copy(stampable_skb, GFP_ATOMIC);
216 if (!skb) {
217 dev_err_ratelimited(dp->ds->dev,
218 "Failed to copy stampable skb\n");
219 spin_unlock(&sp->data->meta_lock);
220 return NULL;
221 }
222 sja1105_transfer_meta(skb, meta); 216 sja1105_transfer_meta(skb, meta);
223 /* The cached copy will be freed now */
224 skb_unref(stampable_skb);
225 217
226 spin_unlock(&sp->data->meta_lock); 218 spin_unlock(&sp->data->meta_lock);
227 } 219 }
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e4aba5d485be..bbe9b3b2d395 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -170,7 +170,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); 170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
171 if (!reasm_data) 171 if (!reasm_data)
172 goto out_oom; 172 goto out_oom;
173 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 173 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
174 174
175 skb->dev = ldev; 175 skb->dev = ldev;
176 skb->tstamp = fq->q.stamp; 176 skb->tstamp = fq->q.stamp;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index dacbd58e1799..badc5cfe4dc6 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
1092 1092
1093static int __init af_ieee802154_init(void) 1093static int __init af_ieee802154_init(void)
1094{ 1094{
1095 int rc = -EINVAL; 1095 int rc;
1096 1096
1097 rc = proto_register(&ieee802154_raw_prot, 1); 1097 rc = proto_register(&ieee802154_raw_prot, 1);
1098 if (rc) 1098 if (rc)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b2b3d291ab0..1ab2fb6bb37d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
2145 2145
2146 if (filter->dump_exceptions) { 2146 if (filter->dump_exceptions) {
2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, 2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
2148 &i_fa, s_fa); 2148 &i_fa, s_fa, flags);
2149 if (err < 0) 2149 if (err < 0)
2150 goto stop; 2150 goto stop;
2151 } 2151 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1510e951f451..4298aae74e0e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
582 582
583 if (!rt) 583 if (!rt)
584 goto out; 584 goto out;
585 net = dev_net(rt->dst.dev); 585
586 if (rt->dst.dev)
587 net = dev_net(rt->dst.dev);
588 else if (skb_in->dev)
589 net = dev_net(skb_in->dev);
590 else
591 goto out;
586 592
587 /* 593 /*
588 * Find the original header. It is expected to be valid, of course. 594 * Find the original header. It is expected to be valid, of course.
@@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
902 return false; 908 return false;
903 } 909 }
904 910
905 icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); 911 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
906 return true; 912 return true;
907} 913}
908 914
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 180f6896b98b..480d0b22db1a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
1475 1475
1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1477{ 1477{
1478 __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); 1478 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
1479} 1479}
1480EXPORT_SYMBOL(ip_mc_inc_group); 1480EXPORT_SYMBOL(ip_mc_inc_group);
1481 1481
@@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2197 iml->sflist = NULL; 2197 iml->sflist = NULL;
2198 iml->sfmode = mode; 2198 iml->sfmode = mode;
2199 rcu_assign_pointer(inet->mc_list, iml); 2199 rcu_assign_pointer(inet->mc_list, iml);
2200 __ip_mc_inc_group(in_dev, addr, mode); 2200 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
2201 err = 0; 2201 err = 0;
2202done: 2202done:
2203 return err; 2203 return err;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index d666756be5f1..10d31733297d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -331,7 +331,7 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
331 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params); 331 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
332 if (!prev) 332 if (!prev)
333 fq = inet_frag_create(fqdir, key, &prev); 333 fq = inet_frag_create(fqdir, key, &prev);
334 if (prev && !IS_ERR(prev)) { 334 if (!IS_ERR_OR_NULL(prev)) {
335 fq = prev; 335 fq = prev;
336 if (!refcount_inc_not_zero(&fq->refcnt)) 336 if (!refcount_inc_not_zero(&fq->refcnt))
337 fq = NULL; 337 fq = NULL;
@@ -475,11 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
475EXPORT_SYMBOL(inet_frag_reasm_prepare); 475EXPORT_SYMBOL(inet_frag_reasm_prepare);
476 476
477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
478 void *reasm_data) 478 void *reasm_data, bool try_coalesce)
479{ 479{
480 struct sk_buff **nextp = (struct sk_buff **)reasm_data; 480 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
481 struct rb_node *rbn; 481 struct rb_node *rbn;
482 struct sk_buff *fp; 482 struct sk_buff *fp;
483 int sum_truesize;
483 484
484 skb_push(head, head->data - skb_network_header(head)); 485 skb_push(head, head->data - skb_network_header(head));
485 486
@@ -487,25 +488,41 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
487 fp = FRAG_CB(head)->next_frag; 488 fp = FRAG_CB(head)->next_frag;
488 rbn = rb_next(&head->rbnode); 489 rbn = rb_next(&head->rbnode);
489 rb_erase(&head->rbnode, &q->rb_fragments); 490 rb_erase(&head->rbnode, &q->rb_fragments);
491
492 sum_truesize = head->truesize;
490 while (rbn || fp) { 493 while (rbn || fp) {
491 /* fp points to the next sk_buff in the current run; 494 /* fp points to the next sk_buff in the current run;
492 * rbn points to the next run. 495 * rbn points to the next run.
493 */ 496 */
494 /* Go through the current run. */ 497 /* Go through the current run. */
495 while (fp) { 498 while (fp) {
496 *nextp = fp; 499 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
497 nextp = &fp->next; 500 bool stolen;
498 fp->prev = NULL; 501 int delta;
499 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); 502
500 fp->sk = NULL; 503 sum_truesize += fp->truesize;
501 head->data_len += fp->len;
502 head->len += fp->len;
503 if (head->ip_summed != fp->ip_summed) 504 if (head->ip_summed != fp->ip_summed)
504 head->ip_summed = CHECKSUM_NONE; 505 head->ip_summed = CHECKSUM_NONE;
505 else if (head->ip_summed == CHECKSUM_COMPLETE) 506 else if (head->ip_summed == CHECKSUM_COMPLETE)
506 head->csum = csum_add(head->csum, fp->csum); 507 head->csum = csum_add(head->csum, fp->csum);
507 head->truesize += fp->truesize; 508
508 fp = FRAG_CB(fp)->next_frag; 509 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
510 &delta)) {
511 kfree_skb_partial(fp, stolen);
512 } else {
513 fp->prev = NULL;
514 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
515 fp->sk = NULL;
516
517 head->data_len += fp->len;
518 head->len += fp->len;
519 head->truesize += fp->truesize;
520
521 *nextp = fp;
522 nextp = &fp->next;
523 }
524
525 fp = next_frag;
509 } 526 }
510 /* Move to the next run. */ 527 /* Move to the next run. */
511 if (rbn) { 528 if (rbn) {
@@ -516,7 +533,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
516 rbn = rbnext; 533 rbn = rbnext;
517 } 534 }
518 } 535 }
519 sub_frag_mem_limit(q->fqdir, head->truesize); 536 sub_frag_mem_limit(q->fqdir, sum_truesize);
520 537
521 *nextp = NULL; 538 *nextp = NULL;
522 skb_mark_not_on_list(head); 539 skb_mark_not_on_list(head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 4385eb9e781f..cfeb8890f94e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -393,6 +393,11 @@ err:
393 return err; 393 return err;
394} 394}
395 395
396static bool ip_frag_coalesce_ok(const struct ipq *qp)
397{
398 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
399}
400
396/* Build a new IP datagram from all its fragments. */ 401/* Build a new IP datagram from all its fragments. */
397static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 402static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
398 struct sk_buff *prev_tail, struct net_device *dev) 403 struct sk_buff *prev_tail, struct net_device *dev)
@@ -421,7 +426,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
421 if (len > 65535) 426 if (len > 65535)
422 goto out_oversize; 427 goto out_oversize;
423 428
424 inet_frag_reasm_finish(&qp->q, skb, reasm_data); 429 inet_frag_reasm_finish(&qp->q, skb, reasm_data,
430 ip_frag_coalesce_ok(qp));
425 431
426 skb->dev = dev; 432 skb->dev = dev;
427 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 433 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 43adfc1641ba..2f01cf6fa0de 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -275,6 +275,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
275 const struct iphdr *tiph = &tunnel->parms.iph; 275 const struct iphdr *tiph = &tunnel->parms.iph;
276 u8 ipproto; 276 u8 ipproto;
277 277
278 if (!pskb_inet_may_pull(skb))
279 goto tx_error;
280
278 switch (skb->protocol) { 281 switch (skb->protocol) {
279 case htons(ETH_P_IP): 282 case htons(ETH_P_IP):
280 ipproto = IPPROTO_IPIP; 283 ipproto = IPPROTO_IPIP;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 4d6bf7ac0792..6bdb1ab8af61 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -416,8 +416,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
416 ctinfo == IP_CT_RELATED_REPLY)) 416 ctinfo == IP_CT_RELATED_REPLY))
417 return XT_CONTINUE; 417 return XT_CONTINUE;
418 418
419 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 419 /* nf_conntrack_proto_icmp guarantees us that we only have ICMP_ECHO,
420 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here 420 * TIMESTAMP, INFO_REQUEST or ICMP_ADDRESS type icmp packets from here
421 * on, which all have an ID field [relevant for hashing]. */ 421 * on, which all have an ID field [relevant for hashing]. */
422 422
423 hash = clusterip_hashfn(skb, cipinfo->config); 423 hash = clusterip_hashfn(skb, cipinfo->config);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 8e7f84ec783d..0e70f3f65f6f 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -36,6 +36,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
36 opts.options |= XT_SYNPROXY_OPT_ECN; 36 opts.options |= XT_SYNPROXY_OPT_ECN;
37 37
38 opts.options &= info->options; 38 opts.options &= info->options;
39 opts.mss_encode = opts.mss;
40 opts.mss = info->mss;
39 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 41 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
40 synproxy_init_timestamp_cookie(info, &opts); 42 synproxy_init_timestamp_cookie(info, &opts);
41 else 43 else
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 59031670b16a..cc23f1ce239c 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
78 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 78 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
79 flow.flowi4_tos = RT_TOS(iph->tos); 79 flow.flowi4_tos = RT_TOS(iph->tos);
80 flow.flowi4_scope = RT_SCOPE_UNIVERSE; 80 flow.flowi4_scope = RT_SCOPE_UNIVERSE;
81 flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
81 82
82 return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; 83 return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
83} 84}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 87b711fd5a44..3e2685c120c7 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -221,11 +221,11 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
221 int ret; 221 int ret;
222 222
223 rtp_exp->tuple.dst.u.udp.port = htons(nated_port); 223 rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
224 ret = nf_ct_expect_related(rtp_exp); 224 ret = nf_ct_expect_related(rtp_exp, 0);
225 if (ret == 0) { 225 if (ret == 0) {
226 rtcp_exp->tuple.dst.u.udp.port = 226 rtcp_exp->tuple.dst.u.udp.port =
227 htons(nated_port + 1); 227 htons(nated_port + 1);
228 ret = nf_ct_expect_related(rtcp_exp); 228 ret = nf_ct_expect_related(rtcp_exp, 0);
229 if (ret == 0) 229 if (ret == 0)
230 break; 230 break;
231 else if (ret == -EBUSY) { 231 else if (ret == -EBUSY) {
@@ -296,7 +296,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
296 int ret; 296 int ret;
297 297
298 exp->tuple.dst.u.tcp.port = htons(nated_port); 298 exp->tuple.dst.u.tcp.port = htons(nated_port);
299 ret = nf_ct_expect_related(exp); 299 ret = nf_ct_expect_related(exp, 0);
300 if (ret == 0) 300 if (ret == 0)
301 break; 301 break;
302 else if (ret != -EBUSY) { 302 else if (ret != -EBUSY) {
@@ -352,7 +352,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
352 int ret; 352 int ret;
353 353
354 exp->tuple.dst.u.tcp.port = htons(nated_port); 354 exp->tuple.dst.u.tcp.port = htons(nated_port);
355 ret = nf_ct_expect_related(exp); 355 ret = nf_ct_expect_related(exp, 0);
356 if (ret == 0) 356 if (ret == 0)
357 break; 357 break;
358 else if (ret != -EBUSY) { 358 else if (ret != -EBUSY) {
@@ -444,7 +444,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
444 int ret; 444 int ret;
445 445
446 exp->tuple.dst.u.tcp.port = htons(nated_port); 446 exp->tuple.dst.u.tcp.port = htons(nated_port);
447 ret = nf_ct_expect_related(exp); 447 ret = nf_ct_expect_related(exp, 0);
448 if (ret == 0) 448 if (ret == 0)
449 break; 449 break;
450 else if (ret != -EBUSY) { 450 else if (ret != -EBUSY) {
@@ -537,7 +537,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
537 int ret; 537 int ret;
538 538
539 exp->tuple.dst.u.tcp.port = htons(nated_port); 539 exp->tuple.dst.u.tcp.port = htons(nated_port);
540 ret = nf_ct_expect_related(exp); 540 ret = nf_ct_expect_related(exp, 0);
541 if (ret == 0) 541 if (ret == 0)
542 break; 542 break;
543 else if (ret != -EBUSY) { 543 else if (ret != -EBUSY) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 517300d587a7..b6a6f18c3dd1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
2728/* called with rcu_read_lock held */ 2728/* called with rcu_read_lock held */
2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2731 struct sk_buff *skb, u32 portid, u32 seq) 2731 struct sk_buff *skb, u32 portid, u32 seq,
2732 unsigned int flags)
2732{ 2733{
2733 struct rtmsg *r; 2734 struct rtmsg *r;
2734 struct nlmsghdr *nlh; 2735 struct nlmsghdr *nlh;
@@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2736 u32 error; 2737 u32 error;
2737 u32 metrics[RTAX_MAX]; 2738 u32 metrics[RTAX_MAX];
2738 2739
2739 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2740 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2740 if (!nlh) 2741 if (!nlh)
2741 return -EMSGSIZE; 2742 return -EMSGSIZE;
2742 2743
@@ -2860,7 +2861,7 @@ nla_put_failure:
2860static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 2861static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2861 struct netlink_callback *cb, u32 table_id, 2862 struct netlink_callback *cb, u32 table_id,
2862 struct fnhe_hash_bucket *bucket, int genid, 2863 struct fnhe_hash_bucket *bucket, int genid,
2863 int *fa_index, int fa_start) 2864 int *fa_index, int fa_start, unsigned int flags)
2864{ 2865{
2865 int i; 2866 int i;
2866 2867
@@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2891 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 2892 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2892 table_id, NULL, skb, 2893 table_id, NULL, skb,
2893 NETLINK_CB(cb->skb).portid, 2894 NETLINK_CB(cb->skb).portid,
2894 cb->nlh->nlmsg_seq); 2895 cb->nlh->nlmsg_seq, flags);
2895 if (err) 2896 if (err)
2896 return err; 2897 return err;
2897next: 2898next:
@@ -2904,7 +2905,7 @@ next:
2904 2905
2905int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 2906int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2906 u32 table_id, struct fib_info *fi, 2907 u32 table_id, struct fib_info *fi,
2907 int *fa_index, int fa_start) 2908 int *fa_index, int fa_start, unsigned int flags)
2908{ 2909{
2909 struct net *net = sock_net(cb->skb->sk); 2910 struct net *net = sock_net(cb->skb->sk);
2910 int nhsel, genid = fnhe_genid(net); 2911 int nhsel, genid = fnhe_genid(net);
@@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2922 err = 0; 2923 err = 0;
2923 if (bucket) 2924 if (bucket)
2924 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 2925 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2925 genid, fa_index, fa_start); 2926 genid, fa_index, fa_start,
2927 flags);
2926 rcu_read_unlock(); 2928 rcu_read_unlock();
2927 if (err) 2929 if (err)
2928 return err; 2930 return err;
@@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3183 fl4.flowi4_tos, res.fi, 0); 3185 fl4.flowi4_tos, res.fi, 0);
3184 } else { 3186 } else {
3185 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3187 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3186 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 3188 NETLINK_CB(in_skb).portid,
3189 nlh->nlmsg_seq, 0);
3187 } 3190 }
3188 if (err < 0) 3191 if (err < 0)
3189 goto errout_rcu; 3192 goto errout_rcu;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 776905899ac0..61082065b26a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
935 return mss_now; 935 return mss_now;
936} 936}
937 937
938/* In some cases, both sendpage() and sendmsg() could have added
939 * an skb to the write queue, but failed adding payload on it.
940 * We need to remove it to consume less memory, but more
941 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
942 * users.
943 */
944static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
945{
946 if (skb && !skb->len) {
947 tcp_unlink_write_queue(skb, sk);
948 if (tcp_write_queue_empty(sk))
949 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
950 sk_wmem_free_skb(sk, skb);
951 }
952}
953
938ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 954ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
939 size_t size, int flags) 955 size_t size, int flags)
940{ 956{
@@ -984,6 +1000,9 @@ new_segment:
984 if (!skb) 1000 if (!skb)
985 goto wait_for_memory; 1001 goto wait_for_memory;
986 1002
1003#ifdef CONFIG_TLS_DEVICE
1004 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1005#endif
987 skb_entail(sk, skb); 1006 skb_entail(sk, skb);
988 copy = size_goal; 1007 copy = size_goal;
989 } 1008 }
@@ -1061,6 +1080,7 @@ out:
1061 return copied; 1080 return copied;
1062 1081
1063do_error: 1082do_error:
1083 tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1064 if (copied) 1084 if (copied)
1065 goto out; 1085 goto out;
1066out_err: 1086out_err:
@@ -1385,18 +1405,11 @@ out_nopush:
1385 sock_zerocopy_put(uarg); 1405 sock_zerocopy_put(uarg);
1386 return copied + copied_syn; 1406 return copied + copied_syn;
1387 1407
1408do_error:
1409 skb = tcp_write_queue_tail(sk);
1388do_fault: 1410do_fault:
1389 if (!skb->len) { 1411 tcp_remove_empty_skb(sk, skb);
1390 tcp_unlink_write_queue(skb, sk);
1391 /* It is the one place in all of TCP, except connection
1392 * reset, where we can be unlinking the send_head.
1393 */
1394 if (tcp_write_queue_empty(sk))
1395 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1396 sk_wmem_free_skb(sk, skb);
1397 }
1398 1412
1399do_error:
1400 if (copied + copied_syn) 1413 if (copied + copied_syn)
1401 goto out; 1414 goto out;
1402out_err: 1415out_err:
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3d1e15401384..8a56e09cfb0e 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -398,10 +398,14 @@ more_data:
398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
399{ 399{
400 struct sk_msg tmp, *msg_tx = NULL; 400 struct sk_msg tmp, *msg_tx = NULL;
401 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
402 int copied = 0, err = 0; 401 int copied = 0, err = 0;
403 struct sk_psock *psock; 402 struct sk_psock *psock;
404 long timeo; 403 long timeo;
404 int flags;
405
406 /* Don't let internal do_tcp_sendpages() flags through */
407 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
408 flags |= MSG_NO_SHARED_FRAGS;
405 409
406 psock = sk_psock_get(sk); 410 psock = sk_psock_get(sk);
407 if (unlikely(!psock)) 411 if (unlikely(!psock))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4af1f5dae9d3..8a645f304e6c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1288,6 +1288,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1288 struct tcp_sock *tp = tcp_sk(sk); 1288 struct tcp_sock *tp = tcp_sk(sk);
1289 struct sk_buff *buff; 1289 struct sk_buff *buff;
1290 int nsize, old_factor; 1290 int nsize, old_factor;
1291 long limit;
1291 int nlen; 1292 int nlen;
1292 u8 flags; 1293 u8 flags;
1293 1294
@@ -1298,8 +1299,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1298 if (nsize < 0) 1299 if (nsize < 0)
1299 nsize = 0; 1300 nsize = 0;
1300 1301
1301 if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf && 1302 /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1302 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) { 1303 * We need some allowance to not penalize applications setting small
1304 * SO_SNDBUF values.
1305 * Also allow first and last skb in retransmit queue to be split.
1306 */
1307 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1308 if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1309 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1310 skb != tcp_rtx_queue_head(sk) &&
1311 skb != tcp_rtx_queue_tail(sk))) {
1303 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); 1312 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1304 return -ENOMEM; 1313 return -ENOMEM;
1305 } 1314 }
@@ -1311,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1311 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1312 if (!buff) 1321 if (!buff)
1313 return -ENOMEM; /* We'll just try again later. */ 1322 return -ENOMEM; /* We'll just try again later. */
1323 skb_copy_decrypted(buff, skb);
1314 1324
1315 sk->sk_wmem_queued += buff->truesize; 1325 sk->sk_wmem_queued += buff->truesize;
1316 sk_mem_charge(sk, buff->truesize); 1326 sk_mem_charge(sk, buff->truesize);
@@ -1865,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1865 buff = sk_stream_alloc_skb(sk, 0, gfp, true); 1875 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
1866 if (unlikely(!buff)) 1876 if (unlikely(!buff))
1867 return -ENOMEM; 1877 return -ENOMEM;
1878 skb_copy_decrypted(buff, skb);
1868 1879
1869 sk->sk_wmem_queued += buff->truesize; 1880 sk->sk_wmem_queued += buff->truesize;
1870 sk_mem_charge(sk, buff->truesize); 1881 sk_mem_charge(sk, buff->truesize);
@@ -2042,7 +2053,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2042 if (len <= skb->len) 2053 if (len <= skb->len)
2043 break; 2054 break;
2044 2055
2045 if (unlikely(TCP_SKB_CB(skb)->eor)) 2056 if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2046 return false; 2057 return false;
2047 2058
2048 len -= skb->len; 2059 len -= skb->len;
@@ -2134,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
2134 sk_mem_charge(sk, nskb->truesize); 2145 sk_mem_charge(sk, nskb->truesize);
2135 2146
2136 skb = tcp_send_head(sk); 2147 skb = tcp_send_head(sk);
2148 skb_copy_decrypted(nskb, skb);
2137 2149
2138 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2150 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2139 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2151 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
@@ -2158,6 +2170,7 @@ static int tcp_mtu_probe(struct sock *sk)
2158 * we need to propagate it to the new skb. 2170 * we need to propagate it to the new skb.
2159 */ 2171 */
2160 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; 2172 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2173 tcp_skb_collapse_tstamp(nskb, skb);
2161 tcp_unlink_write_queue(skb, sk); 2174 tcp_unlink_write_queue(skb, sk);
2162 sk_wmem_free_skb(sk, skb); 2175 sk_wmem_free_skb(sk, skb);
2163 } else { 2176 } else {
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 3d8a1d835471..4849edb62d52 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -96,6 +96,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen)
96 rcu_read_unlock(); 96 rcu_read_unlock();
97} 97}
98 98
99void tcp_update_ulp(struct sock *sk, struct proto *proto)
100{
101 struct inet_connection_sock *icsk = inet_csk(sk);
102
103 if (!icsk->icsk_ulp_ops) {
104 sk->sk_prot = proto;
105 return;
106 }
107
108 if (icsk->icsk_ulp_ops->update)
109 icsk->icsk_ulp_ops->update(sk, proto);
110}
111
99void tcp_cleanup_ulp(struct sock *sk) 112void tcp_cleanup_ulp(struct sock *sk)
100{ 113{
101 struct inet_connection_sock *icsk = inet_csk(sk); 114 struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index dc73888c7859..6a576ff92c39 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
478 if (!idev) { 478 if (!idev) {
479 idev = ipv6_add_dev(dev); 479 idev = ipv6_add_dev(dev);
480 if (IS_ERR(idev)) 480 if (IS_ERR(idev))
481 return NULL; 481 return idev;
482 } 482 }
483 483
484 if (dev->flags&IFF_UP) 484 if (dev->flags&IFF_UP)
@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1045 int err = 0; 1045 int err = 0;
1046 1046
1047 if (addr_type == IPV6_ADDR_ANY || 1047 if (addr_type == IPV6_ADDR_ANY ||
1048 addr_type & IPV6_ADDR_MULTICAST || 1048 (addr_type & IPV6_ADDR_MULTICAST &&
1049 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1049 (!(idev->dev->flags & IFF_LOOPBACK) && 1050 (!(idev->dev->flags & IFF_LOOPBACK) &&
1050 !netif_is_l3_master(idev->dev) && 1051 !netif_is_l3_master(idev->dev) &&
1051 addr_type & IPV6_ADDR_LOOPBACK)) 1052 addr_type & IPV6_ADDR_LOOPBACK))
@@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2465 ASSERT_RTNL(); 2466 ASSERT_RTNL();
2466 2467
2467 idev = ipv6_find_idev(dev); 2468 idev = ipv6_find_idev(dev);
2468 if (!idev) 2469 if (IS_ERR(idev))
2469 return ERR_PTR(-ENOBUFS); 2470 return idev;
2470 2471
2471 if (idev->cnf.disable_ipv6) 2472 if (idev->cnf.disable_ipv6)
2472 return ERR_PTR(-EACCES); 2473 return ERR_PTR(-EACCES);
@@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
3158 ASSERT_RTNL(); 3159 ASSERT_RTNL();
3159 3160
3160 idev = ipv6_find_idev(dev); 3161 idev = ipv6_find_idev(dev);
3161 if (!idev) { 3162 if (IS_ERR(idev)) {
3162 pr_debug("%s: add_dev failed\n", __func__); 3163 pr_debug("%s: add_dev failed\n", __func__);
3163 return; 3164 return;
3164 } 3165 }
@@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
3373 */ 3374 */
3374 3375
3375 idev = ipv6_find_idev(dev); 3376 idev = ipv6_find_idev(dev);
3376 if (!idev) { 3377 if (IS_ERR(idev)) {
3377 pr_debug("%s: add_dev failed\n", __func__); 3378 pr_debug("%s: add_dev failed\n", __func__);
3378 return; 3379 return;
3379 } 3380 }
@@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
3398 ASSERT_RTNL(); 3399 ASSERT_RTNL();
3399 3400
3400 idev = ipv6_find_idev(dev); 3401 idev = ipv6_find_idev(dev);
3401 if (!idev) { 3402 if (IS_ERR(idev)) {
3402 pr_debug("%s: add_dev failed\n", __func__); 3403 pr_debug("%s: add_dev failed\n", __func__);
3403 return; 3404 return;
3404 } 3405 }
@@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4772 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; 4773 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4773 4774
4774 idev = ipv6_find_idev(dev); 4775 idev = ipv6_find_idev(dev);
4775 if (!idev) 4776 if (IS_ERR(idev))
4776 return -ENOBUFS; 4777 return PTR_ERR(idev);
4777 4778
4778 if (!ipv6_allow_optimistic_dad(net, idev)) 4779 if (!ipv6_allow_optimistic_dad(net, idev))
4779 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; 4780 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c2049c72f3e5..dd2d0b963260 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -660,12 +660,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
660 struct flowi6 *fl6, __u8 *dsfield, 660 struct flowi6 *fl6, __u8 *dsfield,
661 int *encap_limit) 661 int *encap_limit)
662{ 662{
663 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 663 struct ipv6hdr *ipv6h;
664 struct ip6_tnl *t = netdev_priv(dev); 664 struct ip6_tnl *t = netdev_priv(dev);
665 __u16 offset; 665 __u16 offset;
666 666
667 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 667 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
668 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 668 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
669 ipv6h = ipv6_hdr(skb);
669 670
670 if (offset > 0) { 671 if (offset > 0) {
671 struct ipv6_tlv_tnl_enc_lim *tel; 672 struct ipv6_tlv_tnl_enc_lim *tel;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3134fbb65d7f..754a484d35df 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1278,12 +1278,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1278 } 1278 }
1279 1279
1280 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1280 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1281 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1281 1282
1282 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1283 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1283 return -1; 1284 return -1;
1284 1285
1285 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1286
1287 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1286 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1288 1287
1289 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1288 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1367,12 +1366,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1367 } 1366 }
1368 1367
1369 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1368 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1369 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1370 1370
1371 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1371 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1372 return -1; 1372 return -1;
1373 1373
1374 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1375
1376 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1374 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1377 1375
1378 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1376 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7f3f13c37916..eaa4c2cc2fbb 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
787 if (pmc) { 787 if (pmc) {
788 im->idev = pmc->idev; 788 im->idev = pmc->idev;
789 if (im->mca_sfmode == MCAST_INCLUDE) { 789 if (im->mca_sfmode == MCAST_INCLUDE) {
790 im->mca_tomb = pmc->mca_tomb; 790 swap(im->mca_tomb, pmc->mca_tomb);
791 im->mca_sources = pmc->mca_sources; 791 swap(im->mca_sources, pmc->mca_sources);
792 for (psf = im->mca_sources; psf; psf = psf->sf_next) 792 for (psf = im->mca_sources; psf; psf = psf->sf_next)
793 psf->sf_crcount = idev->mc_qrv; 793 psf->sf_crcount = idev->mc_qrv;
794 } else { 794 } else {
795 im->mca_crcount = idev->mc_qrv; 795 im->mca_crcount = idev->mc_qrv;
796 } 796 }
797 in6_dev_put(pmc->idev); 797 in6_dev_put(pmc->idev);
798 ip6_mc_clear_src(pmc);
798 kfree(pmc); 799 kfree(pmc);
799 } 800 }
800 spin_unlock_bh(&im->mca_lock); 801 spin_unlock_bh(&im->mca_lock);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index e77ea1ed5edd..5cdb4a69d277 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -36,6 +36,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
36 opts.options |= XT_SYNPROXY_OPT_ECN; 36 opts.options |= XT_SYNPROXY_OPT_ECN;
37 37
38 opts.options &= info->options; 38 opts.options &= info->options;
39 opts.mss_encode = opts.mss;
40 opts.mss = info->mss;
39 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 41 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
40 synproxy_init_timestamp_cookie(info, &opts); 42 synproxy_init_timestamp_cookie(info, &opts);
41 else 43 else
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 6bcaf7357183..d800801a5dd2 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
55 if (rpfilter_addr_linklocal(&iph->saddr)) { 55 if (rpfilter_addr_linklocal(&iph->saddr)) {
56 lookup_flags |= RT6_LOOKUP_F_IFACE; 56 lookup_flags |= RT6_LOOKUP_F_IFACE;
57 fl6.flowi6_oif = dev->ifindex; 57 fl6.flowi6_oif = dev->ifindex;
58 } else if ((flags & XT_RPFILTER_LOOSE) == 0) 58 /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
59 } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
60 (flags & XT_RPFILTER_LOOSE) == 0)
59 fl6.flowi6_oif = dev->ifindex; 61 fl6.flowi6_oif = dev->ifindex;
60 62
61 rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags); 63 rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
70 goto out; 72 goto out;
71 } 73 }
72 74
73 if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) 75 if (rt->rt6i_idev->dev == dev ||
76 l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
77 (flags & XT_RPFILTER_LOOSE))
74 ret = true; 78 ret = true;
75 out: 79 out:
76 ip6_rt_put(rt); 80 ip6_rt_put(rt);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0f82c150543b..fed9666a2f7d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
348 348
349 skb_reset_transport_header(skb); 349 skb_reset_transport_header(skb);
350 350
351 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 351 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
352 352
353 skb->ignore_df = 1; 353 skb->ignore_df = 1;
354 skb->dev = dev; 354 skb->dev = dev;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index ca05b16f1bb9..1f5d4d196dcc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -282,7 +282,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
282 282
283 skb_reset_transport_header(skb); 283 skb_reset_transport_header(skb);
284 284
285 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 285 inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
286 286
287 skb->dev = dev; 287 skb->dev = dev;
288 ipv6_hdr(skb)->payload_len = htons(payload_len); 288 ipv6_hdr(skb)->payload_len = htons(payload_len);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e49fec767a10..fd059e08785a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1951,7 +1951,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1951 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg); 1951 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1952 1952
1953 if (!arg.match) 1953 if (!arg.match)
1954 return; 1954 goto unlock;
1955 fib6_nh = arg.match; 1955 fib6_nh = arg.match;
1956 } else { 1956 } else {
1957 fib6_nh = from->fib6_nh; 1957 fib6_nh = from->fib6_nh;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 09e1694b6d34..ebb62a4ebe30 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -512,7 +512,9 @@ static void iucv_sock_close(struct sock *sk)
512 sk->sk_state = IUCV_DISCONN; 512 sk->sk_state = IUCV_DISCONN;
513 sk->sk_state_change(sk); 513 sk->sk_state_change(sk);
514 } 514 }
515 case IUCV_DISCONN: /* fall through */ 515 /* fall through */
516
517 case IUCV_DISCONN:
516 sk->sk_state = IUCV_CLOSING; 518 sk->sk_state = IUCV_CLOSING;
517 sk->sk_state_change(sk); 519 sk->sk_state_change(sk);
518 520
@@ -525,8 +527,9 @@ static void iucv_sock_close(struct sock *sk)
525 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 527 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
526 timeo); 528 timeo);
527 } 529 }
530 /* fall through */
528 531
529 case IUCV_CLOSING: /* fall through */ 532 case IUCV_CLOSING:
530 sk->sk_state = IUCV_CLOSED; 533 sk->sk_state = IUCV_CLOSED;
531 sk->sk_state_change(sk); 534 sk->sk_state_change(sk);
532 535
@@ -535,8 +538,9 @@ static void iucv_sock_close(struct sock *sk)
535 538
536 skb_queue_purge(&iucv->send_skb_q); 539 skb_queue_purge(&iucv->send_skb_q);
537 skb_queue_purge(&iucv->backlog_skb_q); 540 skb_queue_purge(&iucv->backlog_skb_q);
541 /* fall through */
538 542
539 default: /* fall through */ 543 default:
540 iucv_sever_path(sk, 1); 544 iucv_sever_path(sk, 1);
541 } 545 }
542 546
@@ -2247,10 +2251,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2247 kfree_skb(skb); 2251 kfree_skb(skb);
2248 break; 2252 break;
2249 } 2253 }
2250 /* fall through and receive non-zero length data */ 2254 /* fall through - and receive non-zero length data */
2251 case (AF_IUCV_FLAG_SHT): 2255 case (AF_IUCV_FLAG_SHT):
2252 /* shutdown request */ 2256 /* shutdown request */
2253 /* fall through and receive zero length data */ 2257 /* fall through - and receive zero length data */
2254 case 0: 2258 case 0:
2255 /* plain data frame */ 2259 /* plain data frame */
2256 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2260 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1d0e5904dedf..c54cb59593ef 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1681,6 +1681,9 @@ static const struct proto_ops pppol2tp_ops = {
1681 .recvmsg = pppol2tp_recvmsg, 1681 .recvmsg = pppol2tp_recvmsg,
1682 .mmap = sock_no_mmap, 1682 .mmap = sock_no_mmap,
1683 .ioctl = pppox_ioctl, 1683 .ioctl = pppox_ioctl,
1684#ifdef CONFIG_COMPAT
1685 .compat_ioctl = pppox_compat_ioctl,
1686#endif
1684}; 1687};
1685 1688
1686static const struct pppox_proto pppol2tp_proto = { 1689static const struct pppox_proto pppol2tp_proto = {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 76cc9e967fa6..111c400199ec 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -936,8 +936,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
936 936
937 err = ieee80211_set_probe_resp(sdata, params->probe_resp, 937 err = ieee80211_set_probe_resp(sdata, params->probe_resp,
938 params->probe_resp_len, csa); 938 params->probe_resp_len, csa);
939 if (err < 0) 939 if (err < 0) {
940 kfree(new);
940 return err; 941 return err;
942 }
941 if (err == 0) 943 if (err == 0)
942 changed |= BSS_CHANGED_AP_PROBE_RESP; 944 changed |= BSS_CHANGED_AP_PROBE_RESP;
943 945
@@ -949,8 +951,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
949 params->civicloc, 951 params->civicloc,
950 params->civicloc_len); 952 params->civicloc_len);
951 953
952 if (err < 0) 954 if (err < 0) {
955 kfree(new);
953 return err; 956 return err;
957 }
954 958
955 changed |= BSS_CHANGED_FTM_RESPONDER; 959 changed |= BSS_CHANGED_FTM_RESPONDER;
956 } 960 }
@@ -1542,6 +1546,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1542 if (is_multicast_ether_addr(mac)) 1546 if (is_multicast_ether_addr(mac))
1543 return -EINVAL; 1547 return -EINVAL;
1544 1548
1549 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
1550 sdata->vif.type == NL80211_IFTYPE_STATION &&
1551 !sdata->u.mgd.associated)
1552 return -EINVAL;
1553
1545 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 1554 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
1546 if (!sta) 1555 if (!sta)
1547 return -ENOMEM; 1556 return -ENOMEM;
@@ -1549,10 +1558,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1549 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1558 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1550 sta->sta.tdls = true; 1559 sta->sta.tdls = true;
1551 1560
1552 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1553 !sdata->u.mgd.associated)
1554 return -EINVAL;
1555
1556 err = sta_apply_parameters(local, sta, params); 1561 err = sta_apply_parameters(local, sta, params);
1557 if (err) { 1562 if (err) {
1558 sta_info_free(local, sta); 1563 sta_info_free(local, sta);
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index acd4afb4944b..c9a8a2433e8a 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
187 if (!check_sdata_in_driver(sdata)) 187 if (!check_sdata_in_driver(sdata))
188 return -EIO; 188 return -EIO;
189 189
190 if (WARN_ONCE(params->cw_min == 0 || 190 if (params->cw_min == 0 || params->cw_min > params->cw_max) {
191 params->cw_min > params->cw_max, 191 /*
192 "%s: invalid CW_min/CW_max: %d/%d\n", 192 * If we can't configure hardware anyway, don't warn. We may
193 sdata->name, params->cw_min, params->cw_max)) 193 * never have initialized the CW parameters.
194 */
195 WARN_ONCE(local->ops->conf_tx,
196 "%s: invalid CW_min/CW_max: %d/%d\n",
197 sdata->name, params->cw_min, params->cw_max);
194 return -EINVAL; 198 return -EINVAL;
199 }
195 200
196 trace_drv_conf_tx(local, sdata, ac, params); 201 trace_drv_conf_tx(local, sdata, ac, params);
197 if (local->ops->conf_tx) 202 if (local->ops->conf_tx)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06aac0aaae64..8dc6580e1787 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1222,7 +1222,6 @@ static void ieee80211_if_setup(struct net_device *dev)
1222static void ieee80211_if_setup_no_queue(struct net_device *dev) 1222static void ieee80211_if_setup_no_queue(struct net_device *dev)
1223{ 1223{
1224 ieee80211_if_setup(dev); 1224 ieee80211_if_setup(dev);
1225 dev->features |= NETIF_F_LLTX;
1226 dev->priv_flags |= IFF_NO_QUEUE; 1225 dev->priv_flags |= IFF_NO_QUEUE;
1227} 1226}
1228 1227
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a99ad0325309..4c888dc9bd81 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2042,6 +2042,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
2042 ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac); 2042 ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
2043 } 2043 }
2044 2044
2045 /* WMM specification requires all 4 ACIs. */
2046 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2047 if (params[ac].cw_min == 0) {
2048 sdata_info(sdata,
2049 "AP has invalid WMM params (missing AC %d), using defaults\n",
2050 ac);
2051 return false;
2052 }
2053 }
2054
2045 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 2055 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2046 mlme_dbg(sdata, 2056 mlme_dbg(sdata,
2047 "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", 2057 "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3c1ab870fefe..768d14c9a716 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2447,11 +2447,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
2448 sdata->control_port_over_nl80211)) { 2448 sdata->control_port_over_nl80211)) {
2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2450 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2450 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2451 2451
2452 cfg80211_rx_control_port(dev, skb, noencrypt); 2452 cfg80211_rx_control_port(dev, skb, noencrypt);
2453 dev_kfree_skb(skb); 2453 dev_kfree_skb(skb);
2454 } else { 2454 } else {
2455 memset(skb->cb, 0, sizeof(skb->cb));
2456
2455 /* deliver to local stack */ 2457 /* deliver to local stack */
2456 if (rx->napi) 2458 if (rx->napi)
2457 napi_gro_receive(rx->napi, skb); 2459 napi_gro_receive(rx->napi, skb);
@@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2546 2548
2547 if (skb) { 2549 if (skb) {
2548 skb->protocol = eth_type_trans(skb, dev); 2550 skb->protocol = eth_type_trans(skb, dev);
2549 memset(skb->cb, 0, sizeof(skb->cb));
2550
2551 ieee80211_deliver_skb_to_local_stack(skb, rx); 2551 ieee80211_deliver_skb_to_local_stack(skb, rx);
2552 } 2552 }
2553 2553
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 1b224fa27367..ad1e58184c4e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3796,9 +3796,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3796 } 3796 }
3797 3797
3798 /* Always allow software iftypes */ 3798 /* Always allow software iftypes */
3799 if (local->hw.wiphy->software_iftypes & BIT(iftype) || 3799 if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
3800 (iftype == NL80211_IFTYPE_AP_VLAN &&
3801 local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
3802 if (radar_detect) 3800 if (radar_detect)
3803 return -EINVAL; 3801 return -EINVAL;
3804 return 0; 3802 return 0;
@@ -3833,7 +3831,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3833 3831
3834 if (sdata_iter == sdata || 3832 if (sdata_iter == sdata ||
3835 !ieee80211_sdata_running(sdata_iter) || 3833 !ieee80211_sdata_running(sdata_iter) ||
3836 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) 3834 cfg80211_iftype_allowed(local->hw.wiphy,
3835 wdev_iter->iftype, 0, 1))
3837 continue; 3836 continue;
3838 3837
3839 params.iftype_num[wdev_iter->iftype]++; 3838 params.iftype_num[wdev_iter->iftype]++;
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index d25e91d7bdc1..44b675016393 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
133 mpls_stats_inc_outucastpkts(out_dev, skb); 133 mpls_stats_inc_outucastpkts(out_dev, skb);
134 134
135 if (rt) { 135 if (rt) {
136 if (rt->rt_gw_family == AF_INET) 136 if (rt->rt_gw_family == AF_INET6)
137 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
138 skb);
139 else if (rt->rt_gw_family == AF_INET6)
140 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, 137 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
141 skb); 138 skb);
139 else
140 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
141 skb);
142 } else if (rt6) { 142 } else if (rt6) {
143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { 143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
144 /* 6PE (RFC 4798) */ 144 /* 6PE (RFC 4798) */
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 5c3fad8cba57..0187e65176c0 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
54 checksum = ncsi_calculate_checksum((unsigned char *)h, 54 checksum = ncsi_calculate_checksum((unsigned char *)h,
55 sizeof(*h) + nca->payload); 55 sizeof(*h) + nca->payload);
56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + 56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
57 nca->payload); 57 ALIGN(nca->payload, 4));
58 *pchecksum = htonl(checksum); 58 *pchecksum = htonl(checksum);
59} 59}
60 60
@@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
309 309
310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) 310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
311{ 311{
312 struct ncsi_cmd_handler *nch = NULL;
312 struct ncsi_request *nr; 313 struct ncsi_request *nr;
314 unsigned char type;
313 struct ethhdr *eh; 315 struct ethhdr *eh;
314 struct ncsi_cmd_handler *nch = NULL;
315 int i, ret; 316 int i, ret;
316 317
318 /* Use OEM generic handler for Netlink request */
319 if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
320 type = NCSI_PKT_CMD_OEM;
321 else
322 type = nca->type;
323
317 /* Search for the handler */ 324 /* Search for the handler */
318 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) { 325 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
319 if (ncsi_cmd_handlers[i].type == nca->type) { 326 if (ncsi_cmd_handlers[i].type == type) {
320 if (ncsi_cmd_handlers[i].handler) 327 if (ncsi_cmd_handlers[i].handler)
321 nch = &ncsi_cmd_handlers[i]; 328 nch = &ncsi_cmd_handlers[i];
322 else 329 else
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 7581bf919885..d876bd55f356 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || 47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { 48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
49 netdev_dbg(nr->ndp->ndev.dev, 49 netdev_dbg(nr->ndp->ndev.dev,
50 "NCSI: non zero response/reason code\n"); 50 "NCSI: non zero response/reason code %04xh, %04xh\n",
51 ntohs(h->code), ntohs(h->reason));
51 return -EPERM; 52 return -EPERM;
52 } 53 }
53 54
@@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
55 * sender doesn't support checksum according to NCSI 56 * sender doesn't support checksum according to NCSI
56 * specification. 57 * specification.
57 */ 58 */
58 pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); 59 pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
59 if (ntohl(*pchecksum) == 0) 60 if (ntohl(*pchecksum) == 0)
60 return 0; 61 return 0;
61 62
@@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
63 sizeof(*h) + payload - 4); 64 sizeof(*h) + payload - 4);
64 65
65 if (*pchecksum != htonl(checksum)) { 66 if (*pchecksum != htonl(checksum)) {
66 netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n"); 67 netdev_dbg(nr->ndp->ndev.dev,
68 "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
69 *pchecksum, htonl(checksum));
67 return -EINVAL; 70 return -EINVAL;
68 } 71 }
69 72
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32a45c03786e..0d65f4d39494 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -223,8 +223,6 @@ config NF_CONNTRACK_FTP
223 of Network Address Translation on them. 223 of Network Address Translation on them.
224 224
225 This is FTP support on Layer 3 independent connection tracking. 225 This is FTP support on Layer 3 independent connection tracking.
226 Layer 3 independent connection tracking is experimental scheme
227 which generalize ip_conntrack to support other layer 3 protocols.
228 226
229 To compile it as a module, choose M here. If unsure, say N. 227 To compile it as a module, choose M here. If unsure, say N.
230 228
@@ -338,7 +336,7 @@ config NF_CONNTRACK_SIP
338 help 336 help
339 SIP is an application-layer control protocol that can establish, 337 SIP is an application-layer control protocol that can establish,
340 modify, and terminate multimedia sessions (conferences) such as 338 modify, and terminate multimedia sessions (conferences) such as
341 Internet telephony calls. With the ip_conntrack_sip and 339 Internet telephony calls. With the nf_conntrack_sip and
342 the nf_nat_sip modules you can support the protocol on a connection 340 the nf_nat_sip modules you can support the protocol on a connection
343 tracking/NATing firewall. 341 tracking/NATing firewall.
344 342
@@ -1313,7 +1311,7 @@ config NETFILTER_XT_MATCH_HELPER
1313 depends on NETFILTER_ADVANCED 1311 depends on NETFILTER_ADVANCED
1314 help 1312 help
1315 Helper matching allows you to match packets in dynamic connections 1313 Helper matching allows you to match packets in dynamic connections
1316 tracked by a conntrack-helper, ie. ip_conntrack_ftp 1314 tracked by a conntrack-helper, ie. nf_conntrack_ftp
1317 1315
1318 To compile it as a module, choose M here. If unsure, say Y. 1316 To compile it as a module, choose M here. If unsure, say Y.
1319 1317
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index ca7ac4a25ada..1d4e63326e68 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -226,7 +226,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
226 226
227 e.id = ip_to_id(map, ip); 227 e.id = ip_to_id(map, ip);
228 228
229 if (opt->flags & IPSET_DIM_ONE_SRC) 229 if (opt->flags & IPSET_DIM_TWO_SRC)
230 ether_addr_copy(e.ether, eth_hdr(skb)->h_source); 230 ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
231 else 231 else
232 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); 232 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 2e151856ad99..e64d5f9a89dd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1161,7 +1161,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
1161 return -ENOENT; 1161 return -ENOENT;
1162 1162
1163 write_lock_bh(&ip_set_ref_lock); 1163 write_lock_bh(&ip_set_ref_lock);
1164 if (set->ref != 0) { 1164 if (set->ref != 0 || set->ref_netlink != 0) {
1165 ret = -IPSET_ERR_REFERENCED; 1165 ret = -IPSET_ERR_REFERENCED;
1166 goto out; 1166 goto out;
1167 } 1167 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index faf59b6a998f..24d8f4df4230 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -89,15 +89,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
89 struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } }; 89 struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
90 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); 90 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
91 91
92 /* MAC can be src only */
93 if (!(opt->flags & IPSET_DIM_TWO_SRC))
94 return 0;
95
96 if (skb_mac_header(skb) < skb->head || 92 if (skb_mac_header(skb) < skb->head ||
97 (skb_mac_header(skb) + ETH_HLEN) > skb->data) 93 (skb_mac_header(skb) + ETH_HLEN) > skb->data)
98 return -EINVAL; 94 return -EINVAL;
99 95
100 if (opt->flags & IPSET_DIM_ONE_SRC) 96 if (opt->flags & IPSET_DIM_TWO_SRC)
101 ether_addr_copy(e.ether, eth_hdr(skb)->h_source); 97 ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
102 else 98 else
103 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); 99 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 403541996952..08adcb222986 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -231,7 +231,7 @@ void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
231 231
232 IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n", 232 IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n",
233 __func__, ct, ARG_TUPLE(&exp->tuple)); 233 __func__, ct, ARG_TUPLE(&exp->tuple));
234 nf_ct_expect_related(exp); 234 nf_ct_expect_related(exp, 0);
235 nf_ct_expect_put(exp); 235 nf_ct_expect_put(exp);
236} 236}
237EXPORT_SYMBOL(ip_vs_nfct_expect_related); 237EXPORT_SYMBOL(ip_vs_nfct_expect_related);
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 42ee659d0d1e..d011d2eb0848 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -159,7 +159,7 @@ static int amanda_help(struct sk_buff *skb,
159 if (nf_nat_amanda && ct->status & IPS_NAT_MASK) 159 if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
160 ret = nf_nat_amanda(skb, ctinfo, protoff, 160 ret = nf_nat_amanda(skb, ctinfo, protoff,
161 off - dataoff, len, exp); 161 off - dataoff, len, exp);
162 else if (nf_ct_expect_related(exp) != 0) { 162 else if (nf_ct_expect_related(exp, 0) != 0) {
163 nf_ct_helper_log(skb, ct, "cannot add expectation"); 163 nf_ct_helper_log(skb, ct, "cannot add expectation");
164 ret = NF_DROP; 164 ret = NF_DROP;
165 } 165 }
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
index 921a7b95be68..1ba6becc3079 100644
--- a/net/netfilter/nf_conntrack_broadcast.c
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -68,7 +68,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
68 exp->class = NF_CT_EXPECT_CLASS_DEFAULT; 68 exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
69 exp->helper = NULL; 69 exp->helper = NULL;
70 70
71 nf_ct_expect_related(exp); 71 nf_ct_expect_related(exp, 0);
72 nf_ct_expect_put(exp); 72 nf_ct_expect_put(exp);
73 73
74 nf_ct_refresh(ct, skb, timeout * HZ); 74 nf_ct_refresh(ct, skb, timeout * HZ);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index bdfeacee0817..81a8ef42b88d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
453 * table location, we assume id gets exposed to userspace. 453 * table location, we assume id gets exposed to userspace.
454 * 454 *
455 * Following nf_conn items do not change throughout lifetime 455 * Following nf_conn items do not change throughout lifetime
456 * of the nf_conn after it has been committed to main hash table: 456 * of the nf_conn:
457 * 457 *
458 * 1. nf_conn address 458 * 1. nf_conn address
459 * 2. nf_conn->ext address 459 * 2. nf_conn->master address (normally NULL)
460 * 3. nf_conn->master address (normally NULL) 460 * 3. the associated net namespace
461 * 4. tuple 461 * 4. the original direction tuple
462 * 5. the associated net namespace
463 */ 462 */
464u32 nf_ct_get_id(const struct nf_conn *ct) 463u32 nf_ct_get_id(const struct nf_conn *ct)
465{ 464{
@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
469 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); 468 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
470 469
471 a = (unsigned long)ct; 470 a = (unsigned long)ct;
472 b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); 471 b = (unsigned long)ct->master;
473 c = (unsigned long)ct->ext; 472 c = (unsigned long)nf_ct_net(ct);
474 d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), 473 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
474 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
475 &ct_id_seed); 475 &ct_id_seed);
476#ifdef CONFIG_64BIT 476#ifdef CONFIG_64BIT
477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); 477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
@@ -1817,9 +1817,7 @@ EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1817#include <linux/netfilter/nfnetlink_conntrack.h> 1817#include <linux/netfilter/nfnetlink_conntrack.h>
1818#include <linux/mutex.h> 1818#include <linux/mutex.h>
1819 1819
1820/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 1820/* Generic function for tcp/udp/sctp/dccp and alike. */
1821 * in ip_conntrack_core, since we don't want the protocols to autoload
1822 * or depend on ctnetlink */
1823int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 1821int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1824 const struct nf_conntrack_tuple *tuple) 1822 const struct nf_conntrack_tuple *tuple)
1825{ 1823{
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index ffd1f4906c4f..65364de915d1 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -249,13 +249,22 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
249static inline int expect_matches(const struct nf_conntrack_expect *a, 249static inline int expect_matches(const struct nf_conntrack_expect *a,
250 const struct nf_conntrack_expect *b) 250 const struct nf_conntrack_expect *b)
251{ 251{
252 return a->master == b->master && 252 return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
253 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
254 nf_ct_tuple_mask_equal(&a->mask, &b->mask) && 253 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
255 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && 254 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
256 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master)); 255 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
257} 256}
258 257
258static bool master_matches(const struct nf_conntrack_expect *a,
259 const struct nf_conntrack_expect *b,
260 unsigned int flags)
261{
262 if (flags & NF_CT_EXP_F_SKIP_MASTER)
263 return true;
264
265 return a->master == b->master;
266}
267
259/* Generally a bad idea to call this: could have matched already. */ 268/* Generally a bad idea to call this: could have matched already. */
260void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) 269void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
261{ 270{
@@ -399,7 +408,8 @@ static void evict_oldest_expect(struct nf_conn *master,
399 nf_ct_remove_expect(last); 408 nf_ct_remove_expect(last);
400} 409}
401 410
402static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) 411static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
412 unsigned int flags)
403{ 413{
404 const struct nf_conntrack_expect_policy *p; 414 const struct nf_conntrack_expect_policy *p;
405 struct nf_conntrack_expect *i; 415 struct nf_conntrack_expect *i;
@@ -417,8 +427,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
417 } 427 }
418 h = nf_ct_expect_dst_hash(net, &expect->tuple); 428 h = nf_ct_expect_dst_hash(net, &expect->tuple);
419 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { 429 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
420 if (expect_matches(i, expect)) { 430 if (master_matches(i, expect, flags) &&
421 if (i->class != expect->class) 431 expect_matches(i, expect)) {
432 if (i->class != expect->class ||
433 i->master != expect->master)
422 return -EALREADY; 434 return -EALREADY;
423 435
424 if (nf_ct_remove_expect(i)) 436 if (nf_ct_remove_expect(i))
@@ -453,12 +465,12 @@ out:
453} 465}
454 466
455int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 467int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
456 u32 portid, int report) 468 u32 portid, int report, unsigned int flags)
457{ 469{
458 int ret; 470 int ret;
459 471
460 spin_lock_bh(&nf_conntrack_expect_lock); 472 spin_lock_bh(&nf_conntrack_expect_lock);
461 ret = __nf_ct_expect_check(expect); 473 ret = __nf_ct_expect_check(expect, flags);
462 if (ret < 0) 474 if (ret < 0)
463 goto out; 475 goto out;
464 476
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c6c11bab5b6..8d96738b7dfd 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen,
322 i++; 322 i++;
323 } 323 }
324 324
325 pr_debug("Skipped up to `%c'!\n", skip); 325 pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
326 326
327 *numoff = i; 327 *numoff = i;
328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff); 328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
@@ -525,7 +525,7 @@ skip_nl_seq:
525 protoff, matchoff, matchlen, exp); 525 protoff, matchoff, matchlen, exp);
526 else { 526 else {
527 /* Can't expect this? Best to drop packet now. */ 527 /* Can't expect this? Best to drop packet now. */
528 if (nf_ct_expect_related(exp) != 0) { 528 if (nf_ct_expect_related(exp, 0) != 0) {
529 nf_ct_helper_log(skb, ct, "cannot add expectation"); 529 nf_ct_helper_log(skb, ct, "cannot add expectation");
530 ret = NF_DROP; 530 ret = NF_DROP;
531 } else 531 } else
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 8f6ba8162f0b..573cb4481481 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -1,11 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 3 * BER and PER decoding library for H.323 conntrack/NAT module.
4 * conntrack/NAT module.
5 * 4 *
6 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> 5 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
7 * 6 *
8 * See ip_conntrack_helper_h323_asn1.h for details. 7 * See nf_conntrack_helper_h323_asn1.h for details.
9 */ 8 */
10 9
11#ifdef __KERNEL__ 10#ifdef __KERNEL__
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 6497e5fc0871..8ba037b76ad3 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -305,8 +305,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
305 ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, 305 ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
306 taddr, port, rtp_port, rtp_exp, rtcp_exp); 306 taddr, port, rtp_port, rtp_exp, rtcp_exp);
307 } else { /* Conntrack only */ 307 } else { /* Conntrack only */
308 if (nf_ct_expect_related(rtp_exp) == 0) { 308 if (nf_ct_expect_related(rtp_exp, 0) == 0) {
309 if (nf_ct_expect_related(rtcp_exp) == 0) { 309 if (nf_ct_expect_related(rtcp_exp, 0) == 0) {
310 pr_debug("nf_ct_h323: expect RTP "); 310 pr_debug("nf_ct_h323: expect RTP ");
311 nf_ct_dump_tuple(&rtp_exp->tuple); 311 nf_ct_dump_tuple(&rtp_exp->tuple);
312 pr_debug("nf_ct_h323: expect RTCP "); 312 pr_debug("nf_ct_h323: expect RTCP ");
@@ -364,7 +364,7 @@ static int expect_t120(struct sk_buff *skb,
364 ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr, 364 ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
365 port, exp); 365 port, exp);
366 } else { /* Conntrack only */ 366 } else { /* Conntrack only */
367 if (nf_ct_expect_related(exp) == 0) { 367 if (nf_ct_expect_related(exp, 0) == 0) {
368 pr_debug("nf_ct_h323: expect T.120 "); 368 pr_debug("nf_ct_h323: expect T.120 ");
369 nf_ct_dump_tuple(&exp->tuple); 369 nf_ct_dump_tuple(&exp->tuple);
370 } else 370 } else
@@ -701,7 +701,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
701 ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr, 701 ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
702 port, exp); 702 port, exp);
703 } else { /* Conntrack only */ 703 } else { /* Conntrack only */
704 if (nf_ct_expect_related(exp) == 0) { 704 if (nf_ct_expect_related(exp, 0) == 0) {
705 pr_debug("nf_ct_q931: expect H.245 "); 705 pr_debug("nf_ct_q931: expect H.245 ");
706 nf_ct_dump_tuple(&exp->tuple); 706 nf_ct_dump_tuple(&exp->tuple);
707 } else 707 } else
@@ -825,7 +825,7 @@ static int expect_callforwarding(struct sk_buff *skb,
825 protoff, data, dataoff, 825 protoff, data, dataoff,
826 taddr, port, exp); 826 taddr, port, exp);
827 } else { /* Conntrack only */ 827 } else { /* Conntrack only */
828 if (nf_ct_expect_related(exp) == 0) { 828 if (nf_ct_expect_related(exp, 0) == 0) {
829 pr_debug("nf_ct_q931: expect Call Forwarding "); 829 pr_debug("nf_ct_q931: expect Call Forwarding ");
830 nf_ct_dump_tuple(&exp->tuple); 830 nf_ct_dump_tuple(&exp->tuple);
831 } else 831 } else
@@ -1284,7 +1284,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1284 ret = nat_q931(skb, ct, ctinfo, protoff, data, 1284 ret = nat_q931(skb, ct, ctinfo, protoff, data,
1285 taddr, i, port, exp); 1285 taddr, i, port, exp);
1286 } else { /* Conntrack only */ 1286 } else { /* Conntrack only */
1287 if (nf_ct_expect_related(exp) == 0) { 1287 if (nf_ct_expect_related(exp, 0) == 0) {
1288 pr_debug("nf_ct_ras: expect Q.931 "); 1288 pr_debug("nf_ct_ras: expect Q.931 ");
1289 nf_ct_dump_tuple(&exp->tuple); 1289 nf_ct_dump_tuple(&exp->tuple);
1290 1290
@@ -1349,7 +1349,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
1349 IPPROTO_UDP, NULL, &port); 1349 IPPROTO_UDP, NULL, &port);
1350 exp->helper = nf_conntrack_helper_ras; 1350 exp->helper = nf_conntrack_helper_ras;
1351 1351
1352 if (nf_ct_expect_related(exp) == 0) { 1352 if (nf_ct_expect_related(exp, 0) == 0) {
1353 pr_debug("nf_ct_ras: expect RAS "); 1353 pr_debug("nf_ct_ras: expect RAS ");
1354 nf_ct_dump_tuple(&exp->tuple); 1354 nf_ct_dump_tuple(&exp->tuple);
1355 } else 1355 } else
@@ -1561,7 +1561,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1561 exp->flags = NF_CT_EXPECT_PERMANENT; 1561 exp->flags = NF_CT_EXPECT_PERMANENT;
1562 exp->helper = nf_conntrack_helper_q931; 1562 exp->helper = nf_conntrack_helper_q931;
1563 1563
1564 if (nf_ct_expect_related(exp) == 0) { 1564 if (nf_ct_expect_related(exp, 0) == 0) {
1565 pr_debug("nf_ct_ras: expect Q.931 "); 1565 pr_debug("nf_ct_ras: expect Q.931 ");
1566 nf_ct_dump_tuple(&exp->tuple); 1566 nf_ct_dump_tuple(&exp->tuple);
1567 } else 1567 } else
@@ -1615,7 +1615,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
1615 exp->flags = NF_CT_EXPECT_PERMANENT; 1615 exp->flags = NF_CT_EXPECT_PERMANENT;
1616 exp->helper = nf_conntrack_helper_q931; 1616 exp->helper = nf_conntrack_helper_q931;
1617 1617
1618 if (nf_ct_expect_related(exp) == 0) { 1618 if (nf_ct_expect_related(exp, 0) == 0) {
1619 pr_debug("nf_ct_ras: expect Q.931 "); 1619 pr_debug("nf_ct_ras: expect Q.931 ");
1620 nf_ct_dump_tuple(&exp->tuple); 1620 nf_ct_dump_tuple(&exp->tuple);
1621 } else 1621 } else
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 7ac156f1f3bc..e40988a2f22f 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -213,7 +213,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
213 addr_beg_p - ib_ptr, 213 addr_beg_p - ib_ptr,
214 addr_end_p - addr_beg_p, 214 addr_end_p - addr_beg_p,
215 exp); 215 exp);
216 else if (nf_ct_expect_related(exp) != 0) { 216 else if (nf_ct_expect_related(exp, 0) != 0) {
217 nf_ct_helper_log(skb, ct, 217 nf_ct_helper_log(skb, ct,
218 "cannot add expectation"); 218 "cannot add expectation");
219 ret = NF_DROP; 219 ret = NF_DROP;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1b77444d5b52..6aa01eb6fe99 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2616,7 +2616,7 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2616 if (IS_ERR(exp)) 2616 if (IS_ERR(exp))
2617 return PTR_ERR(exp); 2617 return PTR_ERR(exp);
2618 2618
2619 err = nf_ct_expect_related_report(exp, portid, report); 2619 err = nf_ct_expect_related_report(exp, portid, report, 0);
2620 nf_ct_expect_put(exp); 2620 nf_ct_expect_put(exp);
2621 return err; 2621 return err;
2622} 2622}
@@ -3367,7 +3367,7 @@ ctnetlink_create_expect(struct net *net,
3367 goto err_rcu; 3367 goto err_rcu;
3368 } 3368 }
3369 3369
3370 err = nf_ct_expect_related_report(exp, portid, report); 3370 err = nf_ct_expect_related_report(exp, portid, report, 0);
3371 nf_ct_expect_put(exp); 3371 nf_ct_expect_put(exp);
3372err_rcu: 3372err_rcu:
3373 rcu_read_unlock(); 3373 rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index b22042ad0fca..a971183f11af 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -234,9 +234,9 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
234 nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); 234 nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
235 if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) 235 if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
236 nf_nat_pptp_exp_gre(exp_orig, exp_reply); 236 nf_nat_pptp_exp_gre(exp_orig, exp_reply);
237 if (nf_ct_expect_related(exp_orig) != 0) 237 if (nf_ct_expect_related(exp_orig, 0) != 0)
238 goto out_put_both; 238 goto out_put_both;
239 if (nf_ct_expect_related(exp_reply) != 0) 239 if (nf_ct_expect_related(exp_reply, 0) != 0)
240 goto out_unexpect_orig; 240 goto out_unexpect_orig;
241 241
242 /* Add GRE keymap entries */ 242 /* Add GRE keymap entries */
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index c2eb365f1723..5b05487a60d2 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -1,7 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * ip_conntrack_proto_gre.c - Version 3.0
4 *
5 * Connection tracking protocol helper module for GRE. 3 * Connection tracking protocol helper module for GRE.
6 * 4 *
7 * GRE is a generic encapsulation protocol, which is generally not very 5 * GRE is a generic encapsulation protocol, which is generally not very
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index dd53e2b20f6b..097deba7441a 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -215,7 +215,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
215 return -NF_ACCEPT; 215 return -NF_ACCEPT;
216 } 216 }
217 217
218 /* See ip_conntrack_proto_tcp.c */ 218 /* See nf_conntrack_proto_tcp.c */
219 if (state->net->ct.sysctl_checksum && 219 if (state->net->ct.sysctl_checksum &&
220 state->hook == NF_INET_PRE_ROUTING && 220 state->hook == NF_INET_PRE_ROUTING &&
221 nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) { 221 nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index d5fdfa00d683..85c1f8c213b0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
472 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 472 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
473 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 473 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
474 __u32 seq, ack, sack, end, win, swin; 474 __u32 seq, ack, sack, end, win, swin;
475 u16 win_raw;
475 s32 receiver_offset; 476 s32 receiver_offset;
476 bool res, in_recv_win; 477 bool res, in_recv_win;
477 478
@@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
480 */ 481 */
481 seq = ntohl(tcph->seq); 482 seq = ntohl(tcph->seq);
482 ack = sack = ntohl(tcph->ack_seq); 483 ack = sack = ntohl(tcph->ack_seq);
483 win = ntohs(tcph->window); 484 win_raw = ntohs(tcph->window);
485 win = win_raw;
484 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph); 486 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
485 487
486 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) 488 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
655 && state->last_seq == seq 657 && state->last_seq == seq
656 && state->last_ack == ack 658 && state->last_ack == ack
657 && state->last_end == end 659 && state->last_end == end
658 && state->last_win == win) 660 && state->last_win == win_raw)
659 state->retrans++; 661 state->retrans++;
660 else { 662 else {
661 state->last_dir = dir; 663 state->last_dir = dir;
662 state->last_seq = seq; 664 state->last_seq = seq;
663 state->last_ack = ack; 665 state->last_ack = ack;
664 state->last_end = end; 666 state->last_end = end;
665 state->last_win = win; 667 state->last_win = win_raw;
666 state->retrans = 0; 668 state->retrans = 0;
667 } 669 }
668 } 670 }
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 81448c3db661..1aebd6569d4e 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -153,7 +153,7 @@ static int help(struct sk_buff *skb,
153 nf_ct_dump_tuple(&exp->tuple); 153 nf_ct_dump_tuple(&exp->tuple);
154 154
155 /* Can't expect this? Best to drop packet now. */ 155 /* Can't expect this? Best to drop packet now. */
156 if (nf_ct_expect_related(exp) != 0) { 156 if (nf_ct_expect_related(exp, 0) != 0) {
157 nf_ct_helper_log(skb, ct, "cannot add expectation"); 157 nf_ct_helper_log(skb, ct, "cannot add expectation");
158 ret = NF_DROP; 158 ret = NF_DROP;
159 } 159 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 107251731809..b83dc9bf0a5d 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -977,11 +977,15 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
977 /* -EALREADY handling works around end-points that send 977 /* -EALREADY handling works around end-points that send
978 * SDP messages with identical port but different media type, 978 * SDP messages with identical port but different media type,
979 * we pretend expectation was set up. 979 * we pretend expectation was set up.
980 * It also works in the case that SDP messages are sent with
981 * identical expect tuples but for different master conntracks.
980 */ 982 */
981 int errp = nf_ct_expect_related(rtp_exp); 983 int errp = nf_ct_expect_related(rtp_exp,
984 NF_CT_EXP_F_SKIP_MASTER);
982 985
983 if (errp == 0 || errp == -EALREADY) { 986 if (errp == 0 || errp == -EALREADY) {
984 int errcp = nf_ct_expect_related(rtcp_exp); 987 int errcp = nf_ct_expect_related(rtcp_exp,
988 NF_CT_EXP_F_SKIP_MASTER);
985 989
986 if (errcp == 0 || errcp == -EALREADY) 990 if (errcp == 0 || errcp == -EALREADY)
987 ret = NF_ACCEPT; 991 ret = NF_ACCEPT;
@@ -1296,7 +1300,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1296 ret = hooks->expect(skb, protoff, dataoff, dptr, datalen, 1300 ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
1297 exp, matchoff, matchlen); 1301 exp, matchoff, matchlen);
1298 else { 1302 else {
1299 if (nf_ct_expect_related(exp) != 0) { 1303 if (nf_ct_expect_related(exp, 0) != 0) {
1300 nf_ct_helper_log(skb, ct, "cannot add expectation"); 1304 nf_ct_helper_log(skb, ct, "cannot add expectation");
1301 ret = NF_DROP; 1305 ret = NF_DROP;
1302 } else 1306 } else
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index e0d392cb3075..0006503d2da9 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -1037,9 +1037,14 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; 1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; 1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; 1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
1040 table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
1041 table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
1040#ifdef CONFIG_NF_CONNTRACK_EVENTS 1042#ifdef CONFIG_NF_CONNTRACK_EVENTS
1041 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; 1043 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
1042#endif 1044#endif
1045#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
1046 table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
1047#endif
1043 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; 1048 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
1044 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; 1049 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
1045 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; 1050 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index df6d6d61bd58..80ee53f29f68 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -78,7 +78,7 @@ static int tftp_help(struct sk_buff *skb,
78 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); 78 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
79 if (nf_nat_tftp && ct->status & IPS_NAT_MASK) 79 if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
80 ret = nf_nat_tftp(skb, ctinfo, exp); 80 ret = nf_nat_tftp(skb, ctinfo, exp);
81 else if (nf_ct_expect_related(exp) != 0) { 81 else if (nf_ct_expect_related(exp, 0) != 0) {
82 nf_ct_helper_log(skb, ct, "cannot add expectation"); 82 nf_ct_helper_log(skb, ct, "cannot add expectation");
83 ret = NF_DROP; 83 ret = NF_DROP;
84 } 84 }
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index e3d797252a98..80a8f9ae4c93 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) 111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) 112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113 113
114static void flow_offload_fixup_ct_state(struct nf_conn *ct) 114static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
115{
116 return (__s32)(timeout - (u32)jiffies);
117}
118
119static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
115{ 120{
116 const struct nf_conntrack_l4proto *l4proto; 121 const struct nf_conntrack_l4proto *l4proto;
122 int l4num = nf_ct_protonum(ct);
117 unsigned int timeout; 123 unsigned int timeout;
118 int l4num;
119
120 l4num = nf_ct_protonum(ct);
121 if (l4num == IPPROTO_TCP)
122 flow_offload_fixup_tcp(&ct->proto.tcp);
123 124
124 l4proto = nf_ct_l4proto_find(l4num); 125 l4proto = nf_ct_l4proto_find(l4num);
125 if (!l4proto) 126 if (!l4proto)
@@ -132,7 +133,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
132 else 133 else
133 return; 134 return;
134 135
135 ct->timeout = nfct_time_stamp + timeout; 136 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
137 ct->timeout = nfct_time_stamp + timeout;
138}
139
140static void flow_offload_fixup_ct_state(struct nf_conn *ct)
141{
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 flow_offload_fixup_tcp(&ct->proto.tcp);
144}
145
146static void flow_offload_fixup_ct(struct nf_conn *ct)
147{
148 flow_offload_fixup_ct_state(ct);
149 flow_offload_fixup_ct_timeout(ct);
136} 150}
137 151
138void flow_offload_free(struct flow_offload *flow) 152void flow_offload_free(struct flow_offload *flow)
@@ -208,6 +222,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
208} 222}
209EXPORT_SYMBOL_GPL(flow_offload_add); 223EXPORT_SYMBOL_GPL(flow_offload_add);
210 224
225static inline bool nf_flow_has_expired(const struct flow_offload *flow)
226{
227 return nf_flow_timeout_delta(flow->timeout) <= 0;
228}
229
211static void flow_offload_del(struct nf_flowtable *flow_table, 230static void flow_offload_del(struct nf_flowtable *flow_table,
212 struct flow_offload *flow) 231 struct flow_offload *flow)
213{ 232{
@@ -223,6 +242,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
223 e = container_of(flow, struct flow_offload_entry, flow); 242 e = container_of(flow, struct flow_offload_entry, flow);
224 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); 243 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
225 244
245 if (nf_flow_has_expired(flow))
246 flow_offload_fixup_ct(e->ct);
247 else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
248 flow_offload_fixup_ct_timeout(e->ct);
249
226 flow_offload_free(flow); 250 flow_offload_free(flow);
227} 251}
228 252
@@ -298,11 +322,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
298 return err; 322 return err;
299} 323}
300 324
301static inline bool nf_flow_has_expired(const struct flow_offload *flow)
302{
303 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
304}
305
306static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) 325static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
307{ 326{
308 struct nf_flowtable *flow_table = data; 327 struct nf_flowtable *flow_table = data;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index cdfc33517e85..b9e7dd6e60ce 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -214,6 +214,24 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
214 return true; 214 return true;
215} 215}
216 216
217static int nf_flow_offload_dst_check(struct dst_entry *dst)
218{
219 if (unlikely(dst_xfrm(dst)))
220 return dst_check(dst, 0) ? 0 : -1;
221
222 return 0;
223}
224
225static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct dst_entry *dst)
228{
229 skb_orphan(skb);
230 skb_dst_set_noref(skb, dst);
231 dst_output(state->net, state->sk, skb);
232 return NF_STOLEN;
233}
234
217unsigned int 235unsigned int
218nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 236nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
219 const struct nf_hook_state *state) 237 const struct nf_hook_state *state)
@@ -254,12 +272,25 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
254 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) 272 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
255 return NF_ACCEPT; 273 return NF_ACCEPT;
256 274
275 if (nf_flow_offload_dst_check(&rt->dst)) {
276 flow_offload_teardown(flow);
277 return NF_ACCEPT;
278 }
279
257 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) 280 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
258 return NF_DROP; 281 return NF_DROP;
259 282
260 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 283 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
261 iph = ip_hdr(skb); 284 iph = ip_hdr(skb);
262 ip_decrease_ttl(iph); 285 ip_decrease_ttl(iph);
286 skb->tstamp = 0;
287
288 if (unlikely(dst_xfrm(&rt->dst))) {
289 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
290 IPCB(skb)->iif = skb->dev->ifindex;
291 IPCB(skb)->flags = IPSKB_FORWARDED;
292 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
293 }
263 294
264 skb->dev = outdev; 295 skb->dev = outdev;
265 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 296 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
467 sizeof(*ip6h))) 498 sizeof(*ip6h)))
468 return NF_ACCEPT; 499 return NF_ACCEPT;
469 500
501 if (nf_flow_offload_dst_check(&rt->dst)) {
502 flow_offload_teardown(flow);
503 return NF_ACCEPT;
504 }
505
470 if (skb_try_make_writable(skb, sizeof(*ip6h))) 506 if (skb_try_make_writable(skb, sizeof(*ip6h)))
471 return NF_DROP; 507 return NF_DROP;
472 508
@@ -476,6 +512,14 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
476 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 512 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
477 ip6h = ipv6_hdr(skb); 513 ip6h = ipv6_hdr(skb);
478 ip6h->hop_limit--; 514 ip6h->hop_limit--;
515 skb->tstamp = 0;
516
517 if (unlikely(dst_xfrm(&rt->dst))) {
518 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
519 IP6CB(skb)->iif = skb->dev->ifindex;
520 IP6CB(skb)->flags = IP6SKB_FORWARDED;
521 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
522 }
479 523
480 skb->dev = outdev; 524 skb->dev = outdev;
481 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 525 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c
index a352604d6186..3bc7e0854efe 100644
--- a/net/netfilter/nf_nat_amanda.c
+++ b/net/netfilter/nf_nat_amanda.c
@@ -48,7 +48,7 @@ static unsigned int help(struct sk_buff *skb,
48 int res; 48 int res;
49 49
50 exp->tuple.dst.u.tcp.port = htons(port); 50 exp->tuple.dst.u.tcp.port = htons(port);
51 res = nf_ct_expect_related(exp); 51 res = nf_ct_expect_related(exp, 0);
52 if (res == 0) 52 if (res == 0)
53 break; 53 break;
54 else if (res != -EBUSY) { 54 else if (res != -EBUSY) {
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 9ab410455992..3f6023ed4966 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -519,7 +519,7 @@ another_round:
519 * and NF_INET_LOCAL_OUT, we change the destination to map into the 519 * and NF_INET_LOCAL_OUT, we change the destination to map into the
520 * range. It might not be possible to get a unique tuple, but we try. 520 * range. It might not be possible to get a unique tuple, but we try.
521 * At worst (or if we race), we will end up with a final duplicate in 521 * At worst (or if we race), we will end up with a final duplicate in
522 * __ip_conntrack_confirm and drop the packet. */ 522 * __nf_conntrack_confirm and drop the packet. */
523static void 523static void
524get_unique_tuple(struct nf_conntrack_tuple *tuple, 524get_unique_tuple(struct nf_conntrack_tuple *tuple,
525 const struct nf_conntrack_tuple *orig_tuple, 525 const struct nf_conntrack_tuple *orig_tuple,
diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
index d48484a9d52d..aace6768a64e 100644
--- a/net/netfilter/nf_nat_ftp.c
+++ b/net/netfilter/nf_nat_ftp.c
@@ -91,7 +91,7 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
91 int ret; 91 int ret;
92 92
93 exp->tuple.dst.u.tcp.port = htons(port); 93 exp->tuple.dst.u.tcp.port = htons(port);
94 ret = nf_ct_expect_related(exp); 94 ret = nf_ct_expect_related(exp, 0);
95 if (ret == 0) 95 if (ret == 0)
96 break; 96 break;
97 else if (ret != -EBUSY) { 97 else if (ret != -EBUSY) {
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index dfb7ef8845bd..c691ab8d234c 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -53,7 +53,7 @@ static unsigned int help(struct sk_buff *skb,
53 int ret; 53 int ret;
54 54
55 exp->tuple.dst.u.tcp.port = htons(port); 55 exp->tuple.dst.u.tcp.port = htons(port);
56 ret = nf_ct_expect_related(exp); 56 ret = nf_ct_expect_related(exp, 0);
57 if (ret == 0) 57 if (ret == 0)
58 break; 58 break;
59 else if (ret != -EBUSY) { 59 else if (ret != -EBUSY) {
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index e338d91980d8..f0a735e86851 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -414,7 +414,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
414 int ret; 414 int ret;
415 415
416 exp->tuple.dst.u.udp.port = htons(port); 416 exp->tuple.dst.u.udp.port = htons(port);
417 ret = nf_ct_expect_related(exp); 417 ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER);
418 if (ret == 0) 418 if (ret == 0)
419 break; 419 break;
420 else if (ret != -EBUSY) { 420 else if (ret != -EBUSY) {
@@ -607,7 +607,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
607 int ret; 607 int ret;
608 608
609 rtp_exp->tuple.dst.u.udp.port = htons(port); 609 rtp_exp->tuple.dst.u.udp.port = htons(port);
610 ret = nf_ct_expect_related(rtp_exp); 610 ret = nf_ct_expect_related(rtp_exp,
611 NF_CT_EXP_F_SKIP_MASTER);
611 if (ret == -EBUSY) 612 if (ret == -EBUSY)
612 continue; 613 continue;
613 else if (ret < 0) { 614 else if (ret < 0) {
@@ -615,7 +616,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
615 break; 616 break;
616 } 617 }
617 rtcp_exp->tuple.dst.u.udp.port = htons(port + 1); 618 rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
618 ret = nf_ct_expect_related(rtcp_exp); 619 ret = nf_ct_expect_related(rtcp_exp,
620 NF_CT_EXP_F_SKIP_MASTER);
619 if (ret == 0) 621 if (ret == 0)
620 break; 622 break;
621 else if (ret == -EBUSY) { 623 else if (ret == -EBUSY) {
diff --git a/net/netfilter/nf_nat_tftp.c b/net/netfilter/nf_nat_tftp.c
index 833a11f68031..1a591132d6eb 100644
--- a/net/netfilter/nf_nat_tftp.c
+++ b/net/netfilter/nf_nat_tftp.c
@@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff *skb,
30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; 30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
31 exp->dir = IP_CT_DIR_REPLY; 31 exp->dir = IP_CT_DIR_REPLY;
32 exp->expectfn = nf_nat_follow_master; 32 exp->expectfn = nf_nat_follow_master;
33 if (nf_ct_expect_related(exp) != 0) { 33 if (nf_ct_expect_related(exp, 0) != 0) {
34 nf_ct_helper_log(skb, exp->master, "cannot add expectation"); 34 nf_ct_helper_log(skb, exp->master, "cannot add expectation");
35 return NF_DROP; 35 return NF_DROP;
36 } 36 }
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b101f187eda8..c769462a839e 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -470,7 +470,7 @@ synproxy_send_client_synack(struct net *net,
470 struct iphdr *iph, *niph; 470 struct iphdr *iph, *niph;
471 struct tcphdr *nth; 471 struct tcphdr *nth;
472 unsigned int tcp_hdr_size; 472 unsigned int tcp_hdr_size;
473 u16 mss = opts->mss; 473 u16 mss = opts->mss_encode;
474 474
475 iph = ip_hdr(skb); 475 iph = ip_hdr(skb);
476 476
@@ -687,7 +687,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
687 state = &ct->proto.tcp; 687 state = &ct->proto.tcp;
688 switch (state->state) { 688 switch (state->state) {
689 case TCP_CONNTRACK_CLOSE: 689 case TCP_CONNTRACK_CLOSE:
690 if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 690 if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
691 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - 691 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
692 ntohl(th->seq) + 1); 692 ntohl(th->seq) + 1);
693 break; 693 break;
@@ -884,7 +884,7 @@ synproxy_send_client_synack_ipv6(struct net *net,
884 struct ipv6hdr *iph, *niph; 884 struct ipv6hdr *iph, *niph;
885 struct tcphdr *nth; 885 struct tcphdr *nth;
886 unsigned int tcp_hdr_size; 886 unsigned int tcp_hdr_size;
887 u16 mss = opts->mss; 887 u16 mss = opts->mss_encode;
888 888
889 iph = ipv6_hdr(skb); 889 iph = ipv6_hdr(skb);
890 890
@@ -1111,7 +1111,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
1111 state = &ct->proto.tcp; 1111 state = &ct->proto.tcp;
1112 switch (state->state) { 1112 switch (state->state) {
1113 case TCP_CONNTRACK_CLOSE: 1113 case TCP_CONNTRACK_CLOSE:
1114 if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 1114 if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
1115 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - 1115 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
1116 ntohl(th->seq) + 1); 1116 ntohl(th->seq) + 1);
1117 break; 1117 break;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ed17a7c29b86..d47469f824a1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -138,9 +138,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
138 return; 138 return;
139 139
140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { 140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
141 if (trans->msg_type == NFT_MSG_NEWSET && 141 switch (trans->msg_type) {
142 nft_trans_set(trans) == set) { 142 case NFT_MSG_NEWSET:
143 set->bound = true; 143 if (nft_trans_set(trans) == set)
144 nft_trans_set_bound(trans) = true;
145 break;
146 case NFT_MSG_NEWSETELEM:
147 if (nft_trans_elem_set(trans) == set)
148 nft_trans_elem_set_bound(trans) = true;
144 break; 149 break;
145 } 150 }
146 } 151 }
@@ -1662,7 +1667,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
1662 1667
1663 chain->flags |= NFT_BASE_CHAIN | flags; 1668 chain->flags |= NFT_BASE_CHAIN | flags;
1664 basechain->policy = NF_ACCEPT; 1669 basechain->policy = NF_ACCEPT;
1665 INIT_LIST_HEAD(&basechain->cb_list); 1670 if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
1671 nft_chain_offload_priority(basechain) < 0)
1672 return -EOPNOTSUPP;
1673
1674 flow_block_init(&basechain->flow_block);
1666 } else { 1675 } else {
1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 1676 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
1668 if (chain == NULL) 1677 if (chain == NULL)
@@ -1900,6 +1909,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1900 1909
1901 if (nla[NFTA_CHAIN_FLAGS]) 1910 if (nla[NFTA_CHAIN_FLAGS])
1902 flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS])); 1911 flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS]));
1912 else if (chain)
1913 flags = chain->flags;
1903 1914
1904 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); 1915 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
1905 1916
@@ -6904,7 +6915,7 @@ static int __nf_tables_abort(struct net *net)
6904 break; 6915 break;
6905 case NFT_MSG_NEWSET: 6916 case NFT_MSG_NEWSET:
6906 trans->ctx.table->use--; 6917 trans->ctx.table->use--;
6907 if (nft_trans_set(trans)->bound) { 6918 if (nft_trans_set_bound(trans)) {
6908 nft_trans_destroy(trans); 6919 nft_trans_destroy(trans);
6909 break; 6920 break;
6910 } 6921 }
@@ -6916,7 +6927,7 @@ static int __nf_tables_abort(struct net *net)
6916 nft_trans_destroy(trans); 6927 nft_trans_destroy(trans);
6917 break; 6928 break;
6918 case NFT_MSG_NEWSETELEM: 6929 case NFT_MSG_NEWSETELEM:
6919 if (nft_trans_elem_set(trans)->bound) { 6930 if (nft_trans_elem_set_bound(trans)) {
6920 nft_trans_destroy(trans); 6931 nft_trans_destroy(trans);
6921 break; 6932 break;
6922 } 6933 }
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 2c3302845f67..c0d18c1d77ac 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -103,10 +103,11 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
103} 103}
104 104
105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
106 __be16 proto, 106 __be16 proto, int priority,
107 struct netlink_ext_ack *extack) 107 struct netlink_ext_ack *extack)
108{ 108{
109 common->protocol = proto; 109 common->protocol = proto;
110 common->prio = priority;
110 common->extack = extack; 111 common->extack = extack;
111} 112}
112 113
@@ -116,7 +117,7 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
116 struct flow_block_cb *block_cb; 117 struct flow_block_cb *block_cb;
117 int err; 118 int err;
118 119
119 list_for_each_entry(block_cb, &basechain->cb_list, list) { 120 list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
120 err = block_cb->cb(type, type_data, block_cb->cb_priv); 121 err = block_cb->cb(type, type_data, block_cb->cb_priv);
121 if (err < 0) 122 if (err < 0)
122 return err; 123 return err;
@@ -124,6 +125,15 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
124 return 0; 125 return 0;
125} 126}
126 127
128int nft_chain_offload_priority(struct nft_base_chain *basechain)
129{
130 if (basechain->ops.priority <= 0 ||
131 basechain->ops.priority > USHRT_MAX)
132 return -1;
133
134 return 0;
135}
136
127static int nft_flow_offload_rule(struct nft_trans *trans, 137static int nft_flow_offload_rule(struct nft_trans *trans,
128 enum flow_cls_command command) 138 enum flow_cls_command command)
129{ 139{
@@ -142,7 +152,8 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
142 if (flow) 152 if (flow)
143 proto = flow->proto; 153 proto = flow->proto;
144 154
145 nft_flow_offload_common_init(&cls_flow.common, proto, &extack); 155 nft_flow_offload_common_init(&cls_flow.common, proto,
156 basechain->ops.priority, &extack);
146 cls_flow.command = command; 157 cls_flow.command = command;
147 cls_flow.cookie = (unsigned long) rule; 158 cls_flow.cookie = (unsigned long) rule;
148 if (flow) 159 if (flow)
@@ -154,7 +165,7 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
154static int nft_flow_offload_bind(struct flow_block_offload *bo, 165static int nft_flow_offload_bind(struct flow_block_offload *bo,
155 struct nft_base_chain *basechain) 166 struct nft_base_chain *basechain)
156{ 167{
157 list_splice(&bo->cb_list, &basechain->cb_list); 168 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
158 return 0; 169 return 0;
159} 170}
160 171
@@ -198,6 +209,7 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
198 return -EOPNOTSUPP; 209 return -EOPNOTSUPP;
199 210
200 bo.command = cmd; 211 bo.command = cmd;
212 bo.block = &basechain->flow_block;
201 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 213 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
202 bo.extack = &extack; 214 bo.extack = &extack;
203 INIT_LIST_HEAD(&bo.cb_list); 215 INIT_LIST_HEAD(&bo.cb_list);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 92077d459109..4abbb452cf6c 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
578 ss = nfnetlink_get_subsys(type << 8); 578 ss = nfnetlink_get_subsys(type << 8);
579 rcu_read_unlock(); 579 rcu_read_unlock();
580 if (!ss) 580 if (!ss)
581 request_module("nfnetlink-subsys-%d", type); 581 request_module_nowait("nfnetlink-subsys-%d", type);
582 return 0; 582 return 0;
583} 583}
584#endif 584#endif
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 3fd540b2c6ba..b5d5d071d765 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -193,7 +193,7 @@ static inline void nft_chain_filter_inet_init(void) {}
193static inline void nft_chain_filter_inet_fini(void) {} 193static inline void nft_chain_filter_inet_fini(void) {}
194#endif /* CONFIG_NF_TABLES_IPV6 */ 194#endif /* CONFIG_NF_TABLES_IPV6 */
195 195
196#ifdef CONFIG_NF_TABLES_BRIDGE 196#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
197static unsigned int 197static unsigned int
198nft_do_chain_bridge(void *priv, 198nft_do_chain_bridge(void *priv,
199 struct sk_buff *skb, 199 struct sk_buff *skb,
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
index 2f89bde3c61c..ff9ac8ae0031 100644
--- a/net/netfilter/nft_chain_nat.c
+++ b/net/netfilter/nft_chain_nat.c
@@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
142#ifdef CONFIG_NF_TABLES_IPV6 142#ifdef CONFIG_NF_TABLES_IPV6
143MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat"); 143MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
144#endif 144#endif
145#ifdef CONFIG_NF_TABLES_INET
146MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
147#endif
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 827ab6196df9..46ca8bcca1bd 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1252,7 +1252,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
1252 priv->l4proto, NULL, &priv->dport); 1252 priv->l4proto, NULL, &priv->dport);
1253 exp->timeout.expires = jiffies + priv->timeout * HZ; 1253 exp->timeout.expires = jiffies + priv->timeout * HZ;
1254 1254
1255 if (nf_ct_expect_related(exp) != 0) 1255 if (nf_ct_expect_related(exp, 0) != 0)
1256 regs->verdict.code = NF_DROP; 1256 regs->verdict.code = NF_DROP;
1257} 1257}
1258 1258
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index aa5f571d4361..01705ad74a9a 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
72{ 72{
73 struct nft_flow_offload *priv = nft_expr_priv(expr); 73 struct nft_flow_offload *priv = nft_expr_priv(expr);
74 struct nf_flowtable *flowtable = &priv->flowtable->data; 74 struct nf_flowtable *flowtable = &priv->flowtable->data;
75 struct tcphdr _tcph, *tcph = NULL;
75 enum ip_conntrack_info ctinfo; 76 enum ip_conntrack_info ctinfo;
76 struct nf_flow_route route; 77 struct nf_flow_route route;
77 struct flow_offload *flow; 78 struct flow_offload *flow;
78 enum ip_conntrack_dir dir; 79 enum ip_conntrack_dir dir;
79 bool is_tcp = false;
80 struct nf_conn *ct; 80 struct nf_conn *ct;
81 int ret; 81 int ret;
82 82
@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
89 89
90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { 90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
91 case IPPROTO_TCP: 91 case IPPROTO_TCP:
92 is_tcp = true; 92 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
93 sizeof(_tcph), &_tcph);
94 if (unlikely(!tcph || tcph->fin || tcph->rst))
95 goto out;
93 break; 96 break;
94 case IPPROTO_UDP: 97 case IPPROTO_UDP:
95 break; 98 break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
115 if (!flow) 118 if (!flow)
116 goto err_flow_alloc; 119 goto err_flow_alloc;
117 120
118 if (is_tcp) { 121 if (tcph) {
119 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 122 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
120 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 123 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
121 } 124 }
@@ -146,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
146 return nft_chain_validate_hooks(ctx->chain, hook_mask); 149 return nft_chain_validate_hooks(ctx->chain, hook_mask);
147} 150}
148 151
152static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
153 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
154 .len = NFT_NAME_MAXLEN - 1 },
155};
156
149static int nft_flow_offload_init(const struct nft_ctx *ctx, 157static int nft_flow_offload_init(const struct nft_ctx *ctx,
150 const struct nft_expr *expr, 158 const struct nft_expr *expr,
151 const struct nlattr * const tb[]) 159 const struct nlattr * const tb[])
@@ -204,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
204static struct nft_expr_type nft_flow_offload_type __read_mostly = { 212static struct nft_expr_type nft_flow_offload_type __read_mostly = {
205 .name = "flow_offload", 213 .name = "flow_offload",
206 .ops = &nft_flow_offload_ops, 214 .ops = &nft_flow_offload_ops,
215 .policy = nft_flow_offload_policy,
207 .maxattr = NFTA_FLOW_MAX, 216 .maxattr = NFTA_FLOW_MAX,
208 .owner = THIS_MODULE, 217 .owner = THIS_MODULE,
209}; 218};
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index fe93e731dc7f..b836d550b919 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
129 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); 129 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
130 130
131 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); 131 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
132 if (priv->modulus <= 1) 132 if (priv->modulus < 1)
133 return -ERANGE; 133 return -ERANGE;
134 134
135 if (priv->offset + priv->modulus - 1 < priv->offset) 135 if (priv->offset + priv->modulus - 1 < priv->offset)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 76866f77e343..f69afb9ff3cb 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -60,24 +60,16 @@ void nft_meta_get_eval(const struct nft_expr *expr,
60 *dest = skb->mark; 60 *dest = skb->mark;
61 break; 61 break;
62 case NFT_META_IIF: 62 case NFT_META_IIF:
63 if (in == NULL) 63 *dest = in ? in->ifindex : 0;
64 goto err;
65 *dest = in->ifindex;
66 break; 64 break;
67 case NFT_META_OIF: 65 case NFT_META_OIF:
68 if (out == NULL) 66 *dest = out ? out->ifindex : 0;
69 goto err;
70 *dest = out->ifindex;
71 break; 67 break;
72 case NFT_META_IIFNAME: 68 case NFT_META_IIFNAME:
73 if (in == NULL) 69 strncpy((char *)dest, in ? in->name : "", IFNAMSIZ);
74 goto err;
75 strncpy((char *)dest, in->name, IFNAMSIZ);
76 break; 70 break;
77 case NFT_META_OIFNAME: 71 case NFT_META_OIFNAME:
78 if (out == NULL) 72 strncpy((char *)dest, out ? out->name : "", IFNAMSIZ);
79 goto err;
80 strncpy((char *)dest, out->name, IFNAMSIZ);
81 break; 73 break;
82 case NFT_META_IIFTYPE: 74 case NFT_META_IIFTYPE:
83 if (in == NULL) 75 if (in == NULL)
@@ -546,7 +538,7 @@ nft_meta_select_ops(const struct nft_ctx *ctx,
546 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) 538 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
547 return ERR_PTR(-EINVAL); 539 return ERR_PTR(-EINVAL);
548 540
549#ifdef CONFIG_NF_TABLES_BRIDGE 541#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META)
550 if (ctx->family == NFPROTO_BRIDGE) 542 if (ctx->family == NFPROTO_BRIDGE)
551 return ERR_PTR(-EAGAIN); 543 return ERR_PTR(-EAGAIN);
552#endif 544#endif
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 8487eeff5c0e..43eeb1f609f1 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
291 291
292MODULE_LICENSE("GPL"); 292MODULE_LICENSE("GPL");
293MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); 293MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
294MODULE_ALIAS_NFT_EXPR("nat"); 294MODULE_ALIAS_NFT_EXPR("redir");
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 80060ade8a5b..928e661d1517 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -31,6 +31,8 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts,
31 opts->options |= NF_SYNPROXY_OPT_ECN; 31 opts->options |= NF_SYNPROXY_OPT_ECN;
32 32
33 opts->options &= priv->info.options; 33 opts->options &= priv->info.options;
34 opts->mss_encode = opts->mss;
35 opts->mss = info->mss;
34 if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP) 36 if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
35 synproxy_init_timestamp_cookie(info, opts); 37 synproxy_init_timestamp_cookie(info, opts);
36 else 38 else
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index d0ab1adf5bff..5aab6df74e0f 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
54 nfnl_acct_put(info->nfacct); 54 nfnl_acct_put(info->nfacct);
55} 55}
56 56
57static struct xt_match nfacct_mt_reg __read_mostly = { 57static struct xt_match nfacct_mt_reg[] __read_mostly = {
58 .name = "nfacct", 58 {
59 .family = NFPROTO_UNSPEC, 59 .name = "nfacct",
60 .checkentry = nfacct_mt_checkentry, 60 .revision = 0,
61 .match = nfacct_mt, 61 .family = NFPROTO_UNSPEC,
62 .destroy = nfacct_mt_destroy, 62 .checkentry = nfacct_mt_checkentry,
63 .matchsize = sizeof(struct xt_nfacct_match_info), 63 .match = nfacct_mt,
64 .usersize = offsetof(struct xt_nfacct_match_info, nfacct), 64 .destroy = nfacct_mt_destroy,
65 .me = THIS_MODULE, 65 .matchsize = sizeof(struct xt_nfacct_match_info),
66 .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
67 .me = THIS_MODULE,
68 },
69 {
70 .name = "nfacct",
71 .revision = 1,
72 .family = NFPROTO_UNSPEC,
73 .checkentry = nfacct_mt_checkentry,
74 .match = nfacct_mt,
75 .destroy = nfacct_mt_destroy,
76 .matchsize = sizeof(struct xt_nfacct_match_info_v1),
77 .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
78 .me = THIS_MODULE,
79 },
66}; 80};
67 81
68static int __init nfacct_mt_init(void) 82static int __init nfacct_mt_init(void)
69{ 83{
70 return xt_register_match(&nfacct_mt_reg); 84 return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
71} 85}
72 86
73static void __exit nfacct_mt_exit(void) 87static void __exit nfacct_mt_exit(void)
74{ 88{
75 xt_unregister_match(&nfacct_mt_reg); 89 xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
76} 90}
77 91
78module_init(nfacct_mt_init); 92module_init(nfacct_mt_init);
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index ead7c6022208..b92b22ce8abd 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && 101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || 102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
103 info->invert & XT_PHYSDEV_OP_BRIDGED) && 103 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
104 par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | 104 par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
105 (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
106 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); 105 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
107 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) 106 return -EINVAL;
108 return -EINVAL;
109 } 107 }
110 108
111 if (!brnf_probed) { 109 if (!brnf_probed) {
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 96740d389377..c4f54ad2b98a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -967,6 +967,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
967 967
968 window = skb->data[20]; 968 window = skb->data[20];
969 969
970 sock_hold(make);
970 skb->sk = make; 971 skb->sk = make;
971 skb->destructor = sock_efree; 972 skb->destructor = sock_efree;
972 make->sk_state = TCP_ESTABLISHED; 973 make->sk_state = TCP_ESTABLISHED;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 848c6eb55064..05249eb45082 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
67 struct md_mark mark; 67 struct md_mark mark;
68 struct md_labels labels; 68 struct md_labels labels;
69 char timeout[CTNL_TIMEOUT_NAME_MAX]; 69 char timeout[CTNL_TIMEOUT_NAME_MAX];
70 struct nf_ct_timeout *nf_ct_timeout;
70#if IS_ENABLED(CONFIG_NF_NAT) 71#if IS_ENABLED(CONFIG_NF_NAT)
71 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
72#endif 73#endif
@@ -524,6 +525,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
524 return -EPFNOSUPPORT; 525 return -EPFNOSUPPORT;
525 } 526 }
526 527
528 /* The key extracted from the fragment that completed this datagram
529 * likely didn't have an L4 header, so regenerate it.
530 */
531 ovs_flow_key_update_l3l4(skb, key);
532
527 key->ip.frag = OVS_FRAG_TYPE_NONE; 533 key->ip.frag = OVS_FRAG_TYPE_NONE;
528 skb_clear_hash(skb); 534 skb_clear_hash(skb);
529 skb->ignore_df = 1; 535 skb->ignore_df = 1;
@@ -697,6 +703,14 @@ static bool skb_nfct_cached(struct net *net,
697 if (help && rcu_access_pointer(help->helper) != info->helper) 703 if (help && rcu_access_pointer(help->helper) != info->helper)
698 return false; 704 return false;
699 } 705 }
706 if (info->nf_ct_timeout) {
707 struct nf_conn_timeout *timeout_ext;
708
709 timeout_ext = nf_ct_timeout_find(ct);
710 if (!timeout_ext || info->nf_ct_timeout !=
711 rcu_dereference(timeout_ext->timeout))
712 return false;
713 }
700 /* Force conntrack entry direction to the current packet? */ 714 /* Force conntrack entry direction to the current packet? */
701 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 715 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
702 /* Delete the conntrack entry if confirmed, else just release 716 /* Delete the conntrack entry if confirmed, else just release
@@ -1565,7 +1579,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1565 case OVS_CT_ATTR_TIMEOUT: 1579 case OVS_CT_ATTR_TIMEOUT:
1566 memcpy(info->timeout, nla_data(a), nla_len(a)); 1580 memcpy(info->timeout, nla_data(a), nla_len(a));
1567 if (!memchr(info->timeout, '\0', nla_len(a))) { 1581 if (!memchr(info->timeout, '\0', nla_len(a))) {
1568 OVS_NLERR(log, "Invalid conntrack helper"); 1582 OVS_NLERR(log, "Invalid conntrack timeout");
1569 return -EINVAL; 1583 return -EINVAL;
1570 } 1584 }
1571 break; 1585 break;
@@ -1657,6 +1671,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1657 ct_info.timeout)) 1671 ct_info.timeout))
1658 pr_info_ratelimited("Failed to associated timeout " 1672 pr_info_ratelimited("Failed to associated timeout "
1659 "policy `%s'\n", ct_info.timeout); 1673 "policy `%s'\n", ct_info.timeout);
1674 else
1675 ct_info.nf_ct_timeout = rcu_dereference(
1676 nf_ct_timeout_find(ct_info.ct)->timeout);
1677
1660 } 1678 }
1661 1679
1662 if (helper) { 1680 if (helper) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 892287d06c17..d01410e52097 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1047,7 +1047,7 @@ error:
1047} 1047}
1048 1048
1049/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ 1049/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1050static struct sw_flow_actions *get_flow_actions(struct net *net, 1050static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
1051 const struct nlattr *a, 1051 const struct nlattr *a,
1052 const struct sw_flow_key *key, 1052 const struct sw_flow_key *key,
1053 const struct sw_flow_mask *mask, 1053 const struct sw_flow_mask *mask,
@@ -1081,12 +1081,13 @@ static struct sw_flow_actions *get_flow_actions(struct net *net,
1081 * we should not to return match object with dangling reference 1081 * we should not to return match object with dangling reference
1082 * to mask. 1082 * to mask.
1083 * */ 1083 * */
1084static int ovs_nla_init_match_and_action(struct net *net, 1084static noinline_for_stack int
1085 struct sw_flow_match *match, 1085ovs_nla_init_match_and_action(struct net *net,
1086 struct sw_flow_key *key, 1086 struct sw_flow_match *match,
1087 struct nlattr **a, 1087 struct sw_flow_key *key,
1088 struct sw_flow_actions **acts, 1088 struct nlattr **a,
1089 bool log) 1089 struct sw_flow_actions **acts,
1090 bool log)
1090{ 1091{
1091 struct sw_flow_mask mask; 1092 struct sw_flow_mask mask;
1092 int error = 0; 1093 int error = 0;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dca3b1e2acf0..9d81d2c7bf82 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
60 const struct sk_buff *skb) 60 const struct sk_buff *skb)
61{ 61{
62 struct flow_stats *stats; 62 struct sw_flow_stats *stats;
63 unsigned int cpu = smp_processor_id(); 63 unsigned int cpu = smp_processor_id();
64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
65 65
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
87 if (likely(flow->stats_last_writer != -1) && 87 if (likely(flow->stats_last_writer != -1) &&
88 likely(!rcu_access_pointer(flow->stats[cpu]))) { 88 likely(!rcu_access_pointer(flow->stats[cpu]))) {
89 /* Try to allocate CPU-specific stats. */ 89 /* Try to allocate CPU-specific stats. */
90 struct flow_stats *new_stats; 90 struct sw_flow_stats *new_stats;
91 91
92 new_stats = 92 new_stats =
93 kmem_cache_alloc_node(flow_stats_cache, 93 kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
134 134
135 /* We open code this to make sure cpu 0 is always considered */ 135 /* We open code this to make sure cpu 0 is always considered */
136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
137 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
138 138
139 if (stats) { 139 if (stats) {
140 /* Local CPU may write on non-local stats, so we must 140 /* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
158 158
159 /* We open code this to make sure cpu 0 is always considered */ 159 /* We open code this to make sure cpu 0 is always considered */
160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
161 struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
162 162
163 if (stats) { 163 if (stats) {
164 spin_lock_bh(&stats->lock); 164 spin_lock_bh(&stats->lock);
@@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
523} 523}
524 524
525/** 525/**
526 * key_extract - extracts a flow key from an Ethernet frame. 526 * key_extract_l3l4 - extracts L3/L4 header information.
527 * @skb: sk_buff that contains the frame, with skb->data pointing to the 527 * @skb: sk_buff that contains the frame, with skb->data pointing to the
528 * Ethernet header 528 * L3 header
529 * @key: output flow key 529 * @key: output flow key
530 * 530 *
531 * The caller must ensure that skb->len >= ETH_HLEN.
532 *
533 * Returns 0 if successful, otherwise a negative errno value.
534 *
535 * Initializes @skb header fields as follows:
536 *
537 * - skb->mac_header: the L2 header.
538 *
539 * - skb->network_header: just past the L2 header, or just past the
540 * VLAN header, to the first byte of the L2 payload.
541 *
542 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
543 * on output, then just past the IP header, if one is present and
544 * of a correct length, otherwise the same as skb->network_header.
545 * For other key->eth.type values it is left untouched.
546 *
547 * - skb->protocol: the type of the data starting at skb->network_header.
548 * Equals to key->eth.type.
549 */ 531 */
550static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 532static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
551{ 533{
552 int error; 534 int error;
553 struct ethhdr *eth;
554
555 /* Flags are always used as part of stats */
556 key->tp.flags = 0;
557
558 skb_reset_mac_header(skb);
559
560 /* Link layer. */
561 clear_vlan(key);
562 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
563 if (unlikely(eth_type_vlan(skb->protocol)))
564 return -EINVAL;
565
566 skb_reset_network_header(skb);
567 key->eth.type = skb->protocol;
568 } else {
569 eth = eth_hdr(skb);
570 ether_addr_copy(key->eth.src, eth->h_source);
571 ether_addr_copy(key->eth.dst, eth->h_dest);
572
573 __skb_pull(skb, 2 * ETH_ALEN);
574 /* We are going to push all headers that we pull, so no need to
575 * update skb->csum here.
576 */
577
578 if (unlikely(parse_vlan(skb, key)))
579 return -ENOMEM;
580
581 key->eth.type = parse_ethertype(skb);
582 if (unlikely(key->eth.type == htons(0)))
583 return -ENOMEM;
584
585 /* Multiple tagged packets need to retain TPID to satisfy
586 * skb_vlan_pop(), which will later shift the ethertype into
587 * skb->protocol.
588 */
589 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
590 skb->protocol = key->eth.cvlan.tpid;
591 else
592 skb->protocol = key->eth.type;
593
594 skb_reset_network_header(skb);
595 __skb_push(skb, skb->data - skb_mac_header(skb));
596 }
597 skb_reset_mac_len(skb);
598 535
599 /* Network layer. */ 536 /* Network layer. */
600 if (key->eth.type == htons(ETH_P_IP)) { 537 if (key->eth.type == htons(ETH_P_IP)) {
@@ -623,6 +560,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
623 offset = nh->frag_off & htons(IP_OFFSET); 560 offset = nh->frag_off & htons(IP_OFFSET);
624 if (offset) { 561 if (offset) {
625 key->ip.frag = OVS_FRAG_TYPE_LATER; 562 key->ip.frag = OVS_FRAG_TYPE_LATER;
563 memset(&key->tp, 0, sizeof(key->tp));
626 return 0; 564 return 0;
627 } 565 }
628 if (nh->frag_off & htons(IP_MF) || 566 if (nh->frag_off & htons(IP_MF) ||
@@ -740,8 +678,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
740 return error; 678 return error;
741 } 679 }
742 680
743 if (key->ip.frag == OVS_FRAG_TYPE_LATER) 681 if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
682 memset(&key->tp, 0, sizeof(key->tp));
744 return 0; 683 return 0;
684 }
745 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 685 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
746 key->ip.frag = OVS_FRAG_TYPE_FIRST; 686 key->ip.frag = OVS_FRAG_TYPE_FIRST;
747 687
@@ -788,6 +728,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
788 return 0; 728 return 0;
789} 729}
790 730
731/**
732 * key_extract - extracts a flow key from an Ethernet frame.
733 * @skb: sk_buff that contains the frame, with skb->data pointing to the
734 * Ethernet header
735 * @key: output flow key
736 *
737 * The caller must ensure that skb->len >= ETH_HLEN.
738 *
739 * Returns 0 if successful, otherwise a negative errno value.
740 *
741 * Initializes @skb header fields as follows:
742 *
743 * - skb->mac_header: the L2 header.
744 *
745 * - skb->network_header: just past the L2 header, or just past the
746 * VLAN header, to the first byte of the L2 payload.
747 *
748 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
749 * on output, then just past the IP header, if one is present and
750 * of a correct length, otherwise the same as skb->network_header.
751 * For other key->eth.type values it is left untouched.
752 *
753 * - skb->protocol: the type of the data starting at skb->network_header.
754 * Equals to key->eth.type.
755 */
756static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
757{
758 struct ethhdr *eth;
759
760 /* Flags are always used as part of stats */
761 key->tp.flags = 0;
762
763 skb_reset_mac_header(skb);
764
765 /* Link layer. */
766 clear_vlan(key);
767 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
768 if (unlikely(eth_type_vlan(skb->protocol)))
769 return -EINVAL;
770
771 skb_reset_network_header(skb);
772 key->eth.type = skb->protocol;
773 } else {
774 eth = eth_hdr(skb);
775 ether_addr_copy(key->eth.src, eth->h_source);
776 ether_addr_copy(key->eth.dst, eth->h_dest);
777
778 __skb_pull(skb, 2 * ETH_ALEN);
779 /* We are going to push all headers that we pull, so no need to
780 * update skb->csum here.
781 */
782
783 if (unlikely(parse_vlan(skb, key)))
784 return -ENOMEM;
785
786 key->eth.type = parse_ethertype(skb);
787 if (unlikely(key->eth.type == htons(0)))
788 return -ENOMEM;
789
790 /* Multiple tagged packets need to retain TPID to satisfy
791 * skb_vlan_pop(), which will later shift the ethertype into
792 * skb->protocol.
793 */
794 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
795 skb->protocol = key->eth.cvlan.tpid;
796 else
797 skb->protocol = key->eth.type;
798
799 skb_reset_network_header(skb);
800 __skb_push(skb, skb->data - skb_mac_header(skb));
801 }
802
803 skb_reset_mac_len(skb);
804
805 /* Fill out L3/L4 key info, if any */
806 return key_extract_l3l4(skb, key);
807}
808
809/* In the case of conntrack fragment handling it expects L3 headers,
810 * add a helper.
811 */
812int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
813{
814 return key_extract_l3l4(skb, key);
815}
816
791int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 817int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
792{ 818{
793 int res; 819 int res;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 3e2cc2202d66..b830d5ff7af4 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -194,7 +194,7 @@ struct sw_flow_actions {
194 struct nlattr actions[]; 194 struct nlattr actions[];
195}; 195};
196 196
197struct flow_stats { 197struct sw_flow_stats {
198 u64 packet_count; /* Number of packets matched. */ 198 u64 packet_count; /* Number of packets matched. */
199 u64 byte_count; /* Number of bytes matched. */ 199 u64 byte_count; /* Number of bytes matched. */
200 unsigned long used; /* Last used time (in jiffies). */ 200 unsigned long used; /* Last used time (in jiffies). */
@@ -216,7 +216,7 @@ struct sw_flow {
216 struct cpumask cpu_used_mask; 216 struct cpumask cpu_used_mask;
217 struct sw_flow_mask *mask; 217 struct sw_flow_mask *mask;
218 struct sw_flow_actions __rcu *sf_acts; 218 struct sw_flow_actions __rcu *sf_acts;
219 struct flow_stats __rcu *stats[]; /* One for each CPU. First one 219 struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
220 * is allocated at flow creation time, 220 * is allocated at flow creation time,
221 * the rest are allocated on demand 221 * the rest are allocated on demand
222 * while holding the 'stats[0].lock'. 222 * while holding the 'stats[0].lock'.
@@ -270,6 +270,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
270u64 ovs_flow_used_time(unsigned long flow_jiffies); 270u64 ovs_flow_used_time(unsigned long flow_jiffies);
271 271
272int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); 272int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
273int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
273int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, 274int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
274 struct sk_buff *skb, 275 struct sk_buff *skb,
275 struct sw_flow_key *key); 276 struct sw_flow_key *key);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 988fd8a94e43..cf3582c5ed70 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
66struct sw_flow *ovs_flow_alloc(void) 66struct sw_flow *ovs_flow_alloc(void)
67{ 67{
68 struct sw_flow *flow; 68 struct sw_flow *flow;
69 struct flow_stats *stats; 69 struct sw_flow_stats *stats;
70 70
71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); 71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
72 if (!flow) 72 if (!flow)
@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) 110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
111 if (flow->stats[cpu]) 111 if (flow->stats[cpu])
112 kmem_cache_free(flow_stats_cache, 112 kmem_cache_free(flow_stats_cache,
113 (struct flow_stats __force *)flow->stats[cpu]); 113 (struct sw_flow_stats __force *)flow->stats[cpu]);
114 kmem_cache_free(flow_cache, flow); 114 kmem_cache_free(flow_cache, flow);
115} 115}
116 116
@@ -712,13 +712,13 @@ int ovs_flow_init(void)
712 712
713 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 713 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
714 + (nr_cpu_ids 714 + (nr_cpu_ids
715 * sizeof(struct flow_stats *)), 715 * sizeof(struct sw_flow_stats *)),
716 0, 0, NULL); 716 0, 0, NULL);
717 if (flow_cache == NULL) 717 if (flow_cache == NULL)
718 return -ENOMEM; 718 return -ENOMEM;
719 719
720 flow_stats_cache 720 flow_stats_cache
721 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), 721 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
722 0, SLAB_HWCACHE_ALIGN, NULL); 722 0, SLAB_HWCACHE_ALIGN, NULL);
723 if (flow_stats_cache == NULL) { 723 if (flow_stats_cache == NULL) {
724 kmem_cache_destroy(flow_cache); 724 kmem_cache_destroy(flow_cache);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8d54f3047768..e2742b006d25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2618,6 +2618,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2618 2618
2619 mutex_lock(&po->pg_vec_lock); 2619 mutex_lock(&po->pg_vec_lock);
2620 2620
2621 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2622 * we need to confirm it under protection of pg_vec_lock.
2623 */
2624 if (unlikely(!po->tx_ring.pg_vec)) {
2625 err = -EBUSY;
2626 goto out;
2627 }
2621 if (likely(saddr == NULL)) { 2628 if (likely(saddr == NULL)) {
2622 dev = packet_cached_dev_get(po); 2629 dev = packet_cached_dev_get(po);
2623 proto = po->num; 2630 proto = po->num;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 841f198ea1a8..66e4b61a350d 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group)
154{ 154{
155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); 155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
156 list_del(&group->list); 156 list_del(&group->list);
157 kfree(group); 157 kfree_rcu(group, rcu);
158} 158}
159 159
160static struct psample_group * 160static struct psample_group *
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ec05d91aa9a2..45acab2de0cf 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
291 void *buffer) 291 void *buffer)
292{ 292{
293 struct rds_info_rdma_connection *iinfo = buffer; 293 struct rds_info_rdma_connection *iinfo = buffer;
294 struct rds_ib_connection *ic; 294 struct rds_ib_connection *ic = conn->c_transport_data;
295 295
296 /* We will only ever look at IB transports */ 296 /* We will only ever look at IB transports */
297 if (conn->c_trans != &rds_ib_transport) 297 if (conn->c_trans != &rds_ib_transport)
@@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
301 301
302 iinfo->src_addr = conn->c_laddr.s6_addr32[3]; 302 iinfo->src_addr = conn->c_laddr.s6_addr32[3];
303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; 303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
304 iinfo->tos = conn->c_tos; 304 if (ic) {
305 iinfo->tos = conn->c_tos;
306 iinfo->sl = ic->i_sl;
307 }
305 308
306 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); 309 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
307 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); 310 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
308 if (rds_conn_state(conn) == RDS_CONN_UP) { 311 if (rds_conn_state(conn) == RDS_CONN_UP) {
309 struct rds_ib_device *rds_ibdev; 312 struct rds_ib_device *rds_ibdev;
310 313
311 ic = conn->c_transport_data;
312
313 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, 314 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
314 (union ib_gid *)&iinfo->dst_gid); 315 (union ib_gid *)&iinfo->dst_gid);
315 316
@@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
329 void *buffer) 330 void *buffer)
330{ 331{
331 struct rds6_info_rdma_connection *iinfo6 = buffer; 332 struct rds6_info_rdma_connection *iinfo6 = buffer;
332 struct rds_ib_connection *ic; 333 struct rds_ib_connection *ic = conn->c_transport_data;
333 334
334 /* We will only ever look at IB transports */ 335 /* We will only ever look at IB transports */
335 if (conn->c_trans != &rds_ib_transport) 336 if (conn->c_trans != &rds_ib_transport)
@@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
337 338
338 iinfo6->src_addr = conn->c_laddr; 339 iinfo6->src_addr = conn->c_laddr;
339 iinfo6->dst_addr = conn->c_faddr; 340 iinfo6->dst_addr = conn->c_faddr;
341 if (ic) {
342 iinfo6->tos = conn->c_tos;
343 iinfo6->sl = ic->i_sl;
344 }
340 345
341 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); 346 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
342 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); 347 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
344 if (rds_conn_state(conn) == RDS_CONN_UP) { 349 if (rds_conn_state(conn) == RDS_CONN_UP) {
345 struct rds_ib_device *rds_ibdev; 350 struct rds_ib_device *rds_ibdev;
346 351
347 ic = conn->c_transport_data;
348 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, 352 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
349 (union ib_gid *)&iinfo6->dst_gid); 353 (union ib_gid *)&iinfo6->dst_gid);
350 rds_ibdev = ic->rds_ibdev; 354 rds_ibdev = ic->rds_ibdev;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 303c6ee8bdb7..f2b558e8b5ea 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -220,6 +220,7 @@ struct rds_ib_connection {
220 /* Send/Recv vectors */ 220 /* Send/Recv vectors */
221 int i_scq_vector; 221 int i_scq_vector;
222 int i_rcq_vector; 222 int i_rcq_vector;
223 u8 i_sl;
223}; 224};
224 225
225/* This assumes that atomic_t is at least 32 bits */ 226/* This assumes that atomic_t is at least 32 bits */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fddaa09f7b0d..233f1368162b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
152 RDS_PROTOCOL_MINOR(conn->c_version), 152 RDS_PROTOCOL_MINOR(conn->c_version),
153 ic->i_flowctl ? ", flow control" : ""); 153 ic->i_flowctl ? ", flow control" : "");
154 154
155 /* receive sl from the peer */
156 ic->i_sl = ic->i_cm_id->route.path_rec->sl;
157
155 atomic_set(&ic->i_cq_quiesce, 0); 158 atomic_set(&ic->i_cq_quiesce, 0);
156 159
157 /* Init rings and fill recv. this needs to wait until protocol 160 /* Init rings and fill recv. this needs to wait until protocol
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index ff74c4bbb9fc..5f741e51b4ba 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
43static struct rdma_cm_id *rds6_rdma_listen_id; 43static struct rdma_cm_id *rds6_rdma_listen_id;
44#endif 44#endif
45 45
46/* Per IB specification 7.7.3, service level is a 4-bit field. */
47#define TOS_TO_SL(tos) ((tos) & 0xF)
48
46static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, 49static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
47 struct rdma_cm_event *event, 50 struct rdma_cm_event *event,
48 bool isv6) 51 bool isv6)
@@ -97,15 +100,19 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
97 struct rds_ib_connection *ibic; 100 struct rds_ib_connection *ibic;
98 101
99 ibic = conn->c_transport_data; 102 ibic = conn->c_transport_data;
100 if (ibic && ibic->i_cm_id == cm_id) 103 if (ibic && ibic->i_cm_id == cm_id) {
104 cm_id->route.path_rec[0].sl =
105 TOS_TO_SL(conn->c_tos);
101 ret = trans->cm_initiate_connect(cm_id, isv6); 106 ret = trans->cm_initiate_connect(cm_id, isv6);
102 else 107 } else {
103 rds_conn_drop(conn); 108 rds_conn_drop(conn);
109 }
104 } 110 }
105 break; 111 break;
106 112
107 case RDMA_CM_EVENT_ESTABLISHED: 113 case RDMA_CM_EVENT_ESTABLISHED:
108 trans->cm_connect_complete(conn, event); 114 if (conn)
115 trans->cm_connect_complete(conn, event);
109 break; 116 break;
110 117
111 case RDMA_CM_EVENT_REJECTED: 118 case RDMA_CM_EVENT_REJECTED:
@@ -137,6 +144,8 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
137 break; 144 break;
138 145
139 case RDMA_CM_EVENT_DISCONNECTED: 146 case RDMA_CM_EVENT_DISCONNECTED:
147 if (!conn)
148 break;
140 rdsdebug("DISCONNECT event - dropping connection " 149 rdsdebug("DISCONNECT event - dropping connection "
141 "%pI6c->%pI6c\n", &conn->c_laddr, 150 "%pI6c->%pI6c\n", &conn->c_laddr,
142 &conn->c_faddr); 151 &conn->c_faddr);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 853de4876088..a42ba7fa06d5 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
811 811
812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); 812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len); 813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
814 minfo6.tos = inc->i_conn->c_tos;
814 815
815 if (flip) { 816 if (flip) {
816 minfo6.laddr = *daddr; 817 minfo6.laddr = *daddr;
@@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
824 minfo6.fport = inc->i_hdr.h_dport; 825 minfo6.fport = inc->i_hdr.h_dport;
825 } 826 }
826 827
828 minfo6.flags = 0;
829
827 rds_info_copy(iter, &minfo6, sizeof(minfo6)); 830 rds_info_copy(iter, &minfo6, sizeof(minfo6));
828} 831}
829#endif 832#endif
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d09eaf153544..d72ddb67bb74 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
193 193
194service_in_use: 194service_in_use:
195 write_unlock(&local->services_lock); 195 write_unlock(&local->services_lock);
196 rxrpc_put_local(local); 196 rxrpc_unuse_local(local);
197 ret = -EADDRINUSE; 197 ret = -EADDRINUSE;
198error_unlock: 198error_unlock:
199 release_sock(&rx->sk); 199 release_sock(&rx->sk);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life);
402 */ 402 */
403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) 403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
404{ 404{
405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
406 rxrpc_propose_ack_ping_for_check_life); 406 rxrpc_propose_ack_ping_for_check_life);
407 rxrpc_send_ack_packet(call, true, NULL); 407 rxrpc_send_ack_packet(call, true, NULL);
408} 408}
@@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk)
862static int rxrpc_release_sock(struct sock *sk) 862static int rxrpc_release_sock(struct sock *sk)
863{ 863{
864 struct rxrpc_sock *rx = rxrpc_sk(sk); 864 struct rxrpc_sock *rx = rxrpc_sk(sk);
865 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
866 865
867 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 866 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
868 867
@@ -898,10 +897,8 @@ static int rxrpc_release_sock(struct sock *sk)
898 rxrpc_release_calls_on_socket(rx); 897 rxrpc_release_calls_on_socket(rx);
899 flush_workqueue(rxrpc_workqueue); 898 flush_workqueue(rxrpc_workqueue);
900 rxrpc_purge_queue(&sk->sk_receive_queue); 899 rxrpc_purge_queue(&sk->sk_receive_queue);
901 rxrpc_queue_work(&rxnet->service_conn_reaper);
902 rxrpc_queue_work(&rxnet->client_conn_reaper);
903 900
904 rxrpc_put_local(rx->local); 901 rxrpc_unuse_local(rx->local);
905 rx->local = NULL; 902 rx->local = NULL;
906 key_put(rx->key); 903 key_put(rx->key);
907 rx->key = NULL; 904 rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 80335b4ee4fd..8051dfdcf26d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -185,11 +185,17 @@ struct rxrpc_host_header {
185 * - max 48 bytes (struct sk_buff::cb) 185 * - max 48 bytes (struct sk_buff::cb)
186 */ 186 */
187struct rxrpc_skb_priv { 187struct rxrpc_skb_priv {
188 union { 188 atomic_t nr_ring_pins; /* Number of rxtx ring pins */
189 u8 nr_jumbo; /* Number of jumbo subpackets */ 189 u8 nr_subpackets; /* Number of subpackets */
190 }; 190 u8 rx_flags; /* Received packet flags */
191#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
192#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
191 union { 193 union {
192 int remain; /* amount of space remaining for next write */ 194 int remain; /* amount of space remaining for next write */
195
196 /* List of requested ACKs on subpackets */
197 unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
198 BITS_PER_LONG];
193 }; 199 };
194 200
195 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ 201 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
@@ -254,7 +260,8 @@ struct rxrpc_security {
254 */ 260 */
255struct rxrpc_local { 261struct rxrpc_local {
256 struct rcu_head rcu; 262 struct rcu_head rcu;
257 atomic_t usage; 263 atomic_t active_users; /* Number of users of the local endpoint */
264 atomic_t usage; /* Number of references to the structure */
258 struct rxrpc_net *rxnet; /* The network ns in which this resides */ 265 struct rxrpc_net *rxnet; /* The network ns in which this resides */
259 struct list_head link; 266 struct list_head link;
260 struct socket *socket; /* my UDP socket */ 267 struct socket *socket; /* my UDP socket */
@@ -612,8 +619,7 @@ struct rxrpc_call {
612#define RXRPC_TX_ANNO_LAST 0x04 619#define RXRPC_TX_ANNO_LAST 0x04
613#define RXRPC_TX_ANNO_RESENT 0x08 620#define RXRPC_TX_ANNO_RESENT 0x08
614 621
615#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ 622#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
616#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
617#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ 623#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
618 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but 624 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
619 * not hard-ACK'd packet follows this. 625 * not hard-ACK'd packet follows this.
@@ -649,7 +655,6 @@ struct rxrpc_call {
649 655
650 /* receive-phase ACK management */ 656 /* receive-phase ACK management */
651 u8 ackr_reason; /* reason to ACK */ 657 u8 ackr_reason; /* reason to ACK */
652 u16 ackr_skew; /* skew on packet being ACK'd */
653 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 658 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
654 rxrpc_serial_t ackr_first_seq; /* first sequence number received */ 659 rxrpc_serial_t ackr_first_seq; /* first sequence number received */
655 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ 660 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
@@ -743,7 +748,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
743/* 748/*
744 * call_event.c 749 * call_event.c
745 */ 750 */
746void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 751void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
747 enum rxrpc_propose_ack_trace); 752 enum rxrpc_propose_ack_trace);
748void rxrpc_process_call(struct work_struct *); 753void rxrpc_process_call(struct work_struct *);
749 754
@@ -905,6 +910,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *);
905void rxrpc_put_client_conn(struct rxrpc_connection *); 910void rxrpc_put_client_conn(struct rxrpc_connection *);
906void rxrpc_discard_expired_client_conns(struct work_struct *); 911void rxrpc_discard_expired_client_conns(struct work_struct *);
907void rxrpc_destroy_all_client_connections(struct rxrpc_net *); 912void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
913void rxrpc_clean_up_local_conns(struct rxrpc_local *);
908 914
909/* 915/*
910 * conn_event.c 916 * conn_event.c
@@ -1002,6 +1008,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
1002struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); 1008struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1003struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); 1009struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1004void rxrpc_put_local(struct rxrpc_local *); 1010void rxrpc_put_local(struct rxrpc_local *);
1011struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
1012void rxrpc_unuse_local(struct rxrpc_local *);
1005void rxrpc_queue_local(struct rxrpc_local *); 1013void rxrpc_queue_local(struct rxrpc_local *);
1006void rxrpc_destroy_all_locals(struct rxrpc_net *); 1014void rxrpc_destroy_all_locals(struct rxrpc_net *);
1007 1015
@@ -1061,6 +1069,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
1061struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1069struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1062struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); 1070struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1063void rxrpc_put_peer(struct rxrpc_peer *); 1071void rxrpc_put_peer(struct rxrpc_peer *);
1072void rxrpc_put_peer_locked(struct rxrpc_peer *);
1064 1073
1065/* 1074/*
1066 * proc.c 1075 * proc.c
@@ -1102,6 +1111,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1102void rxrpc_packet_destructor(struct sk_buff *); 1111void rxrpc_packet_destructor(struct sk_buff *);
1103void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); 1112void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1104void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); 1113void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1114void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
1105void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); 1115void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1106void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); 1116void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1107void rxrpc_purge_queue(struct sk_buff_head *); 1117void rxrpc_purge_queue(struct sk_buff_head *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index bc2adeb3acb9..cedbbb3a7c2e 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
43 * propose an ACK be sent 43 * propose an ACK be sent
44 */ 44 */
45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
46 u16 skew, u32 serial, bool immediate, 46 u32 serial, bool immediate, bool background,
47 bool background,
48 enum rxrpc_propose_ack_trace why) 47 enum rxrpc_propose_ack_trace why)
49{ 48{
50 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 49 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
@@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
69 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { 68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
70 outcome = rxrpc_propose_ack_update; 69 outcome = rxrpc_propose_ack_update;
71 call->ackr_serial = serial; 70 call->ackr_serial = serial;
72 call->ackr_skew = skew;
73 } 71 }
74 if (!immediate) 72 if (!immediate)
75 goto trace; 73 goto trace;
76 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { 74 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
77 call->ackr_reason = ack_reason; 75 call->ackr_reason = ack_reason;
78 call->ackr_serial = serial; 76 call->ackr_serial = serial;
79 call->ackr_skew = skew;
80 } else { 77 } else {
81 outcome = rxrpc_propose_ack_subsume; 78 outcome = rxrpc_propose_ack_subsume;
82 } 79 }
@@ -137,11 +134,11 @@ trace:
137 * propose an ACK be sent, locking the call structure 134 * propose an ACK be sent, locking the call structure
138 */ 135 */
139void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 136void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
140 u16 skew, u32 serial, bool immediate, bool background, 137 u32 serial, bool immediate, bool background,
141 enum rxrpc_propose_ack_trace why) 138 enum rxrpc_propose_ack_trace why)
142{ 139{
143 spin_lock_bh(&call->lock); 140 spin_lock_bh(&call->lock);
144 __rxrpc_propose_ACK(call, ack_reason, skew, serial, 141 __rxrpc_propose_ACK(call, ack_reason, serial,
145 immediate, background, why); 142 immediate, background, why);
146 spin_unlock_bh(&call->lock); 143 spin_unlock_bh(&call->lock);
147} 144}
@@ -202,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
202 continue; 199 continue;
203 200
204 skb = call->rxtx_buffer[ix]; 201 skb = call->rxtx_buffer[ix];
205 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 202 rxrpc_see_skb(skb, rxrpc_skb_seen);
206 203
207 if (anno_type == RXRPC_TX_ANNO_UNACK) { 204 if (anno_type == RXRPC_TX_ANNO_UNACK) {
208 if (ktime_after(skb->tstamp, max_age)) { 205 if (ktime_after(skb->tstamp, max_age)) {
@@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
239 ack_ts = ktime_sub(now, call->acks_latest_ts); 236 ack_ts = ktime_sub(now, call->acks_latest_ts);
240 if (ktime_to_ns(ack_ts) < call->peer->rtt) 237 if (ktime_to_ns(ack_ts) < call->peer->rtt)
241 goto out; 238 goto out;
242 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
243 rxrpc_propose_ack_ping_for_lost_ack); 240 rxrpc_propose_ack_ping_for_lost_ack);
244 rxrpc_send_ack_packet(call, true, NULL); 241 rxrpc_send_ack_packet(call, true, NULL);
245 goto out; 242 goto out;
@@ -258,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
258 continue; 255 continue;
259 256
260 skb = call->rxtx_buffer[ix]; 257 skb = call->rxtx_buffer[ix];
261 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 258 rxrpc_get_skb(skb, rxrpc_skb_got);
262 spin_unlock_bh(&call->lock); 259 spin_unlock_bh(&call->lock);
263 260
264 if (rxrpc_send_data_packet(call, skb, true) < 0) { 261 if (rxrpc_send_data_packet(call, skb, true) < 0) {
265 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 262 rxrpc_free_skb(skb, rxrpc_skb_freed);
266 return; 263 return;
267 } 264 }
268 265
269 if (rxrpc_is_client_call(call)) 266 if (rxrpc_is_client_call(call))
270 rxrpc_expose_client_call(call); 267 rxrpc_expose_client_call(call);
271 268
272 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 269 rxrpc_free_skb(skb, rxrpc_skb_freed);
273 spin_lock_bh(&call->lock); 270 spin_lock_bh(&call->lock);
274 271
275 /* We need to clear the retransmit state, but there are two 272 /* We need to clear the retransmit state, but there are two
@@ -372,7 +369,7 @@ recheck_state:
372 if (time_after_eq(now, t)) { 369 if (time_after_eq(now, t)) {
373 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); 370 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
374 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); 371 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
375 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, 372 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
376 rxrpc_propose_ack_ping_for_keepalive); 373 rxrpc_propose_ack_ping_for_keepalive);
377 set_bit(RXRPC_CALL_EV_PING, &call->events); 374 set_bit(RXRPC_CALL_EV_PING, &call->events);
378 } 375 }
@@ -407,7 +404,7 @@ recheck_state:
407 send_ack = NULL; 404 send_ack = NULL;
408 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { 405 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
409 call->acks_lost_top = call->tx_top; 406 call->acks_lost_top = call->tx_top;
410 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 407 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
411 rxrpc_propose_ack_ping_for_lost_ack); 408 rxrpc_propose_ack_ping_for_lost_ack);
412 send_ack = &call->acks_lost_ping; 409 send_ack = &call->acks_lost_ping;
413 } 410 }
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 217b12be9e08..014548c259ce 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -422,6 +422,19 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
422} 422}
423 423
424/* 424/*
425 * Clean up the RxTx skb ring.
426 */
427static void rxrpc_cleanup_ring(struct rxrpc_call *call)
428{
429 int i;
430
431 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
432 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
433 call->rxtx_buffer[i] = NULL;
434 }
435}
436
437/*
425 * Detach a call from its owning socket. 438 * Detach a call from its owning socket.
426 */ 439 */
427void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) 440void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
@@ -429,7 +442,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
429 const void *here = __builtin_return_address(0); 442 const void *here = __builtin_return_address(0);
430 struct rxrpc_connection *conn = call->conn; 443 struct rxrpc_connection *conn = call->conn;
431 bool put = false; 444 bool put = false;
432 int i;
433 445
434 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 446 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
435 447
@@ -479,13 +491,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
479 if (conn) 491 if (conn)
480 rxrpc_disconnect_call(call); 492 rxrpc_disconnect_call(call);
481 493
482 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 494 rxrpc_cleanup_ring(call);
483 rxrpc_free_skb(call->rxtx_buffer[i],
484 (call->tx_phase ? rxrpc_skb_tx_cleaned :
485 rxrpc_skb_rx_cleaned));
486 call->rxtx_buffer[i] = NULL;
487 }
488
489 _leave(""); 495 _leave("");
490} 496}
491 497
@@ -568,8 +574,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
568 */ 574 */
569void rxrpc_cleanup_call(struct rxrpc_call *call) 575void rxrpc_cleanup_call(struct rxrpc_call *call)
570{ 576{
571 int i;
572
573 _net("DESTROY CALL %d", call->debug_id); 577 _net("DESTROY CALL %d", call->debug_id);
574 578
575 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 579 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
@@ -580,13 +584,8 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
580 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 584 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
581 ASSERTCMP(call->conn, ==, NULL); 585 ASSERTCMP(call->conn, ==, NULL);
582 586
583 /* Clean up the Rx/Tx buffer */ 587 rxrpc_cleanup_ring(call);
584 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) 588 rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
585 rxrpc_free_skb(call->rxtx_buffer[i],
586 (call->tx_phase ? rxrpc_skb_tx_cleaned :
587 rxrpc_skb_rx_cleaned));
588
589 rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
590 589
591 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 590 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
592} 591}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index aea82f909c60..3f1da1b49f69 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1162 1162
1163 _leave(""); 1163 _leave("");
1164} 1164}
1165
1166/*
1167 * Clean up the client connections on a local endpoint.
1168 */
1169void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1170{
1171 struct rxrpc_connection *conn, *tmp;
1172 struct rxrpc_net *rxnet = local->rxnet;
1173 unsigned int nr_active;
1174 LIST_HEAD(graveyard);
1175
1176 _enter("");
1177
1178 spin_lock(&rxnet->client_conn_cache_lock);
1179 nr_active = rxnet->nr_active_client_conns;
1180
1181 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1182 cache_link) {
1183 if (conn->params.local == local) {
1184 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
1185
1186 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1187 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1188 BUG();
1189 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1190 list_move(&conn->cache_link, &graveyard);
1191 nr_active--;
1192 }
1193 }
1194
1195 rxnet->nr_active_client_conns = nr_active;
1196 spin_unlock(&rxnet->client_conn_cache_lock);
1197 ASSERTCMP(nr_active, >=, 0);
1198
1199 while (!list_empty(&graveyard)) {
1200 conn = list_entry(graveyard.next,
1201 struct rxrpc_connection, cache_link);
1202 list_del_init(&conn->cache_link);
1203
1204 rxrpc_put_connection(conn);
1205 }
1206
1207 _leave(" [culled]");
1208}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index df6624c140be..a1ceef4f5cd0 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work)
472 /* go through the conn-level event packets, releasing the ref on this 472 /* go through the conn-level event packets, releasing the ref on this
473 * connection that each one has when we've finished with it */ 473 * connection that each one has when we've finished with it */
474 while ((skb = skb_dequeue(&conn->rx_queue))) { 474 while ((skb = skb_dequeue(&conn->rx_queue))) {
475 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 475 rxrpc_see_skb(skb, rxrpc_skb_seen);
476 ret = rxrpc_process_event(conn, skb, &abort_code); 476 ret = rxrpc_process_event(conn, skb, &abort_code);
477 switch (ret) { 477 switch (ret) {
478 case -EPROTO: 478 case -EPROTO:
@@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work)
484 goto requeue_and_leave; 484 goto requeue_and_leave;
485 case -ECONNABORTED: 485 case -ECONNABORTED:
486 default: 486 default:
487 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 487 rxrpc_free_skb(skb, rxrpc_skb_freed);
488 break; 488 break;
489 } 489 }
490 } 490 }
@@ -501,6 +501,6 @@ requeue_and_leave:
501protocol_error: 501protocol_error:
502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0) 502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
503 goto requeue_and_leave; 503 goto requeue_and_leave;
504 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 504 rxrpc_free_skb(skb, rxrpc_skb_freed);
505 goto out; 505 goto out;
506} 506}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 434ef392212b..ed05b6922132 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
399 continue; 399 continue;
400 400
401 if (rxnet->live) { 401 if (rxnet->live && !conn->params.local->dead) {
402 idle_timestamp = READ_ONCE(conn->idle_timestamp); 402 idle_timestamp = READ_ONCE(conn->idle_timestamp);
403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; 403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
404 if (conn->params.local->service_closed) 404 if (conn->params.local->service_closed)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5bd6f1546e5c..d122c53c8697 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -196,15 +196,14 @@ send_extra_data:
196 * Ping the other end to fill our RTT cache and to retrieve the rwind 196 * Ping the other end to fill our RTT cache and to retrieve the rwind
197 * and MTU parameters. 197 * and MTU parameters.
198 */ 198 */
199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, 199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
200 int skew)
201{ 200{
202 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 201 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
203 ktime_t now = skb->tstamp; 202 ktime_t now = skb->tstamp;
204 203
205 if (call->peer->rtt_usage < 3 || 204 if (call->peer->rtt_usage < 3 ||
206 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 205 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
207 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 206 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
208 true, true, 207 true, true,
209 rxrpc_propose_ack_ping_for_params); 208 rxrpc_propose_ack_ping_for_params);
210} 209}
@@ -234,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
234 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; 233 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
235 skb = call->rxtx_buffer[ix]; 234 skb = call->rxtx_buffer[ix];
236 annotation = call->rxtx_annotations[ix]; 235 annotation = call->rxtx_annotations[ix];
237 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); 236 rxrpc_see_skb(skb, rxrpc_skb_rotated);
238 call->rxtx_buffer[ix] = NULL; 237 call->rxtx_buffer[ix] = NULL;
239 call->rxtx_annotations[ix] = 0; 238 call->rxtx_annotations[ix] = 0;
240 skb->next = list; 239 skb->next = list;
@@ -259,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
259 skb = list; 258 skb = list;
260 list = skb->next; 259 list = skb->next;
261 skb_mark_not_on_list(skb); 260 skb_mark_not_on_list(skb);
262 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 261 rxrpc_free_skb(skb, rxrpc_skb_freed);
263 } 262 }
264 263
265 return rot_last; 264 return rot_last;
@@ -348,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
348} 347}
349 348
350/* 349/*
351 * Scan a jumbo packet to validate its structure and to work out how many 350 * Scan a data packet to validate its structure and to work out how many
352 * subpackets it contains. 351 * subpackets it contains.
353 * 352 *
354 * A jumbo packet is a collection of consecutive packets glued together with 353 * A jumbo packet is a collection of consecutive packets glued together with
@@ -359,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
359 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any 358 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
360 * size. 359 * size.
361 */ 360 */
362static bool rxrpc_validate_jumbo(struct sk_buff *skb) 361static bool rxrpc_validate_data(struct sk_buff *skb)
363{ 362{
364 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 363 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
365 unsigned int offset = sizeof(struct rxrpc_wire_header); 364 unsigned int offset = sizeof(struct rxrpc_wire_header);
366 unsigned int len = skb->len; 365 unsigned int len = skb->len;
367 int nr_jumbo = 1;
368 u8 flags = sp->hdr.flags; 366 u8 flags = sp->hdr.flags;
369 367
370 do { 368 for (;;) {
371 nr_jumbo++; 369 if (flags & RXRPC_REQUEST_ACK)
370 __set_bit(sp->nr_subpackets, sp->rx_req_ack);
371 sp->nr_subpackets++;
372
373 if (!(flags & RXRPC_JUMBO_PACKET))
374 break;
375
372 if (len - offset < RXRPC_JUMBO_SUBPKTLEN) 376 if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
373 goto protocol_error; 377 goto protocol_error;
374 if (flags & RXRPC_LAST_PACKET) 378 if (flags & RXRPC_LAST_PACKET)
@@ -377,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb)
377 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 381 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
378 goto protocol_error; 382 goto protocol_error;
379 offset += sizeof(struct rxrpc_jumbo_header); 383 offset += sizeof(struct rxrpc_jumbo_header);
380 } while (flags & RXRPC_JUMBO_PACKET); 384 }
381 385
382 sp->nr_jumbo = nr_jumbo; 386 if (flags & RXRPC_LAST_PACKET)
387 sp->rx_flags |= RXRPC_SKB_INCL_LAST;
383 return true; 388 return true;
384 389
385protocol_error: 390protocol_error:
@@ -400,10 +405,10 @@ protocol_error:
400 * (that information is encoded in the ACK packet). 405 * (that information is encoded in the ACK packet).
401 */ 406 */
402static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, 407static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
403 u8 annotation, bool *_jumbo_bad) 408 bool is_jumbo, bool *_jumbo_bad)
404{ 409{
405 /* Discard normal packets that are duplicates. */ 410 /* Discard normal packets that are duplicates. */
406 if (annotation == 0) 411 if (is_jumbo)
407 return; 412 return;
408 413
409 /* Skip jumbo subpackets that are duplicates. When we've had three or 414 /* Skip jumbo subpackets that are duplicates. When we've had three or
@@ -417,30 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
417} 422}
418 423
419/* 424/*
420 * Process a DATA packet, adding the packet to the Rx ring. 425 * Process a DATA packet, adding the packet to the Rx ring. The caller's
426 * packet ref must be passed on or discarded.
421 */ 427 */
422static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, 428static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
423 u16 skew)
424{ 429{
425 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 430 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
426 enum rxrpc_call_state state; 431 enum rxrpc_call_state state;
427 unsigned int offset = sizeof(struct rxrpc_wire_header); 432 unsigned int j;
428 unsigned int ix;
429 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 433 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
430 rxrpc_seq_t seq = sp->hdr.seq, hard_ack; 434 rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
431 bool immediate_ack = false, jumbo_bad = false, queued; 435 bool immediate_ack = false, jumbo_bad = false;
432 u16 len; 436 u8 ack = 0;
433 u8 ack = 0, flags, annotation = 0;
434 437
435 _enter("{%u,%u},{%u,%u}", 438 _enter("{%u,%u},{%u,%u}",
436 call->rx_hard_ack, call->rx_top, skb->len, seq); 439 call->rx_hard_ack, call->rx_top, skb->len, seq0);
437 440
438 _proto("Rx DATA %%%u { #%u f=%02x }", 441 _proto("Rx DATA %%%u { #%u f=%02x n=%u }",
439 sp->hdr.serial, seq, sp->hdr.flags); 442 sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
440 443
441 state = READ_ONCE(call->state); 444 state = READ_ONCE(call->state);
442 if (state >= RXRPC_CALL_COMPLETE) 445 if (state >= RXRPC_CALL_COMPLETE) {
446 rxrpc_free_skb(skb, rxrpc_skb_freed);
443 return; 447 return;
448 }
444 449
445 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { 450 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
446 unsigned long timo = READ_ONCE(call->next_req_timo); 451 unsigned long timo = READ_ONCE(call->next_req_timo);
@@ -465,156 +470,157 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
465 !rxrpc_receiving_reply(call)) 470 !rxrpc_receiving_reply(call))
466 goto unlock; 471 goto unlock;
467 472
468 call->ackr_prev_seq = seq; 473 call->ackr_prev_seq = seq0;
469
470 hard_ack = READ_ONCE(call->rx_hard_ack); 474 hard_ack = READ_ONCE(call->rx_hard_ack);
471 if (after(seq, hard_ack + call->rx_winsize)) {
472 ack = RXRPC_ACK_EXCEEDS_WINDOW;
473 ack_serial = serial;
474 goto ack;
475 }
476 475
477 flags = sp->hdr.flags; 476 if (sp->nr_subpackets > 1) {
478 if (flags & RXRPC_JUMBO_PACKET) {
479 if (call->nr_jumbo_bad > 3) { 477 if (call->nr_jumbo_bad > 3) {
480 ack = RXRPC_ACK_NOSPACE; 478 ack = RXRPC_ACK_NOSPACE;
481 ack_serial = serial; 479 ack_serial = serial;
482 goto ack; 480 goto ack;
483 } 481 }
484 annotation = 1;
485 } 482 }
486 483
487next_subpacket: 484 for (j = 0; j < sp->nr_subpackets; j++) {
488 queued = false; 485 rxrpc_serial_t serial = sp->hdr.serial + j;
489 ix = seq & RXRPC_RXTX_BUFF_MASK; 486 rxrpc_seq_t seq = seq0 + j;
490 len = skb->len; 487 unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
491 if (flags & RXRPC_JUMBO_PACKET) 488 bool terminal = (j == sp->nr_subpackets - 1);
492 len = RXRPC_JUMBO_DATALEN; 489 bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
493 490 u8 flags, annotation = j;
494 if (flags & RXRPC_LAST_PACKET) { 491
495 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 492 _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
496 seq != call->rx_top) { 493 j, serial, seq, terminal, last);
497 rxrpc_proto_abort("LSN", call, seq); 494
498 goto unlock; 495 if (last) {
499 } 496 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
500 } else { 497 seq != call->rx_top) {
501 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 498 rxrpc_proto_abort("LSN", call, seq);
502 after_eq(seq, call->rx_top)) { 499 goto unlock;
503 rxrpc_proto_abort("LSA", call, seq); 500 }
504 goto unlock; 501 } else {
502 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
503 after_eq(seq, call->rx_top)) {
504 rxrpc_proto_abort("LSA", call, seq);
505 goto unlock;
506 }
505 } 507 }
506 }
507
508 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
509 if (before_eq(seq, hard_ack)) {
510 ack = RXRPC_ACK_DUPLICATE;
511 ack_serial = serial;
512 goto skip;
513 }
514 508
515 if (flags & RXRPC_REQUEST_ACK && !ack) { 509 flags = 0;
516 ack = RXRPC_ACK_REQUESTED; 510 if (last)
517 ack_serial = serial; 511 flags |= RXRPC_LAST_PACKET;
518 } 512 if (!terminal)
513 flags |= RXRPC_JUMBO_PACKET;
514 if (test_bit(j, sp->rx_req_ack))
515 flags |= RXRPC_REQUEST_ACK;
516 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
519 517
520 if (call->rxtx_buffer[ix]) { 518 if (before_eq(seq, hard_ack)) {
521 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
522 if (ack != RXRPC_ACK_DUPLICATE) {
523 ack = RXRPC_ACK_DUPLICATE; 519 ack = RXRPC_ACK_DUPLICATE;
524 ack_serial = serial; 520 ack_serial = serial;
521 continue;
525 } 522 }
526 immediate_ack = true;
527 goto skip;
528 }
529
530 /* Queue the packet. We use a couple of memory barriers here as need
531 * to make sure that rx_top is perceived to be set after the buffer
532 * pointer and that the buffer pointer is set after the annotation and
533 * the skb data.
534 *
535 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
536 * and also rxrpc_fill_out_ack().
537 */
538 rxrpc_get_skb(skb, rxrpc_skb_rx_got);
539 call->rxtx_annotations[ix] = annotation;
540 smp_wmb();
541 call->rxtx_buffer[ix] = skb;
542 if (after(seq, call->rx_top)) {
543 smp_store_release(&call->rx_top, seq);
544 } else if (before(seq, call->rx_top)) {
545 /* Send an immediate ACK if we fill in a hole */
546 if (!ack) {
547 ack = RXRPC_ACK_DELAY;
548 ack_serial = serial;
549 }
550 immediate_ack = true;
551 }
552 if (flags & RXRPC_LAST_PACKET) {
553 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
554 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
555 } else {
556 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
557 }
558 queued = true;
559 523
560 if (after_eq(seq, call->rx_expect_next)) { 524 if (call->rxtx_buffer[ix]) {
561 if (after(seq, call->rx_expect_next)) { 525 rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1,
562 _net("OOS %u > %u", seq, call->rx_expect_next); 526 &jumbo_bad);
563 ack = RXRPC_ACK_OUT_OF_SEQUENCE; 527 if (ack != RXRPC_ACK_DUPLICATE) {
564 ack_serial = serial; 528 ack = RXRPC_ACK_DUPLICATE;
529 ack_serial = serial;
530 }
531 immediate_ack = true;
532 continue;
565 } 533 }
566 call->rx_expect_next = seq + 1;
567 }
568 534
569skip:
570 offset += len;
571 if (flags & RXRPC_JUMBO_PACKET) {
572 if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
573 rxrpc_proto_abort("XJF", call, seq);
574 goto unlock;
575 }
576 offset += sizeof(struct rxrpc_jumbo_header);
577 seq++;
578 serial++;
579 annotation++;
580 if (flags & RXRPC_JUMBO_PACKET)
581 annotation |= RXRPC_RX_ANNO_JLAST;
582 if (after(seq, hard_ack + call->rx_winsize)) { 535 if (after(seq, hard_ack + call->rx_winsize)) {
583 ack = RXRPC_ACK_EXCEEDS_WINDOW; 536 ack = RXRPC_ACK_EXCEEDS_WINDOW;
584 ack_serial = serial; 537 ack_serial = serial;
585 if (!jumbo_bad) { 538 if (flags & RXRPC_JUMBO_PACKET) {
586 call->nr_jumbo_bad++; 539 if (!jumbo_bad) {
587 jumbo_bad = true; 540 call->nr_jumbo_bad++;
541 jumbo_bad = true;
542 }
588 } 543 }
544
589 goto ack; 545 goto ack;
590 } 546 }
591 547
592 _proto("Rx DATA Jumbo %%%u", serial); 548 if (flags & RXRPC_REQUEST_ACK && !ack) {
593 goto next_subpacket; 549 ack = RXRPC_ACK_REQUESTED;
594 } 550 ack_serial = serial;
551 }
552
553 /* Queue the packet. We use a couple of memory barriers here as need
554 * to make sure that rx_top is perceived to be set after the buffer
555 * pointer and that the buffer pointer is set after the annotation and
556 * the skb data.
557 *
558 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
559 * and also rxrpc_fill_out_ack().
560 */
561 if (!terminal)
562 rxrpc_get_skb(skb, rxrpc_skb_got);
563 call->rxtx_annotations[ix] = annotation;
564 smp_wmb();
565 call->rxtx_buffer[ix] = skb;
566 if (after(seq, call->rx_top)) {
567 smp_store_release(&call->rx_top, seq);
568 } else if (before(seq, call->rx_top)) {
569 /* Send an immediate ACK if we fill in a hole */
570 if (!ack) {
571 ack = RXRPC_ACK_DELAY;
572 ack_serial = serial;
573 }
574 immediate_ack = true;
575 }
595 576
596 if (queued && flags & RXRPC_LAST_PACKET && !ack) { 577 if (terminal) {
597 ack = RXRPC_ACK_DELAY; 578 /* From this point on, we're not allowed to touch the
598 ack_serial = serial; 579 * packet any longer as its ref now belongs to the Rx
580 * ring.
581 */
582 skb = NULL;
583 }
584
585 if (last) {
586 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
587 if (!ack) {
588 ack = RXRPC_ACK_DELAY;
589 ack_serial = serial;
590 }
591 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
592 } else {
593 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
594 }
595
596 if (after_eq(seq, call->rx_expect_next)) {
597 if (after(seq, call->rx_expect_next)) {
598 _net("OOS %u > %u", seq, call->rx_expect_next);
599 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
600 ack_serial = serial;
601 }
602 call->rx_expect_next = seq + 1;
603 }
599 } 604 }
600 605
601ack: 606ack:
602 if (ack) 607 if (ack)
603 rxrpc_propose_ACK(call, ack, skew, ack_serial, 608 rxrpc_propose_ACK(call, ack, ack_serial,
604 immediate_ack, true, 609 immediate_ack, true,
605 rxrpc_propose_ack_input_data); 610 rxrpc_propose_ack_input_data);
606 else 611 else
607 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, 612 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
608 false, true, 613 false, true,
609 rxrpc_propose_ack_input_data); 614 rxrpc_propose_ack_input_data);
610 615
611 if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { 616 if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
612 trace_rxrpc_notify_socket(call->debug_id, serial); 617 trace_rxrpc_notify_socket(call->debug_id, serial);
613 rxrpc_notify_socket(call); 618 rxrpc_notify_socket(call);
614 } 619 }
615 620
616unlock: 621unlock:
617 spin_unlock(&call->input_lock); 622 spin_unlock(&call->input_lock);
623 rxrpc_free_skb(skb, rxrpc_skb_freed);
618 _leave(" [queued]"); 624 _leave(" [queued]");
619} 625}
620 626
@@ -822,8 +828,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
822 * soft-ACK means that the packet may be discarded and retransmission 828 * soft-ACK means that the packet may be discarded and retransmission
823 * requested. A phase is complete when all packets are hard-ACK'd. 829 * requested. A phase is complete when all packets are hard-ACK'd.
824 */ 830 */
825static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, 831static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
826 u16 skew)
827{ 832{
828 struct rxrpc_ack_summary summary = { 0 }; 833 struct rxrpc_ack_summary summary = { 0 };
829 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 834 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -867,11 +872,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
867 if (buf.ack.reason == RXRPC_ACK_PING) { 872 if (buf.ack.reason == RXRPC_ACK_PING) {
868 _proto("Rx ACK %%%u PING Request", sp->hdr.serial); 873 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
869 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 874 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
870 skew, sp->hdr.serial, true, true, 875 sp->hdr.serial, true, true,
871 rxrpc_propose_ack_respond_to_ping); 876 rxrpc_propose_ack_respond_to_ping);
872 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 877 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
873 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, 878 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
874 skew, sp->hdr.serial, true, true, 879 sp->hdr.serial, true, true,
875 rxrpc_propose_ack_respond_to_ack); 880 rxrpc_propose_ack_respond_to_ack);
876 } 881 }
877 882
@@ -948,7 +953,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
948 RXRPC_TX_ANNO_LAST && 953 RXRPC_TX_ANNO_LAST &&
949 summary.nr_acks == call->tx_top - hard_ack && 954 summary.nr_acks == call->tx_top - hard_ack &&
950 rxrpc_is_client_call(call)) 955 rxrpc_is_client_call(call))
951 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 956 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
952 false, true, 957 false, true,
953 rxrpc_propose_ack_ping_for_lost_reply); 958 rxrpc_propose_ack_ping_for_lost_reply);
954 959
@@ -1004,7 +1009,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1004 * Process an incoming call packet. 1009 * Process an incoming call packet.
1005 */ 1010 */
1006static void rxrpc_input_call_packet(struct rxrpc_call *call, 1011static void rxrpc_input_call_packet(struct rxrpc_call *call,
1007 struct sk_buff *skb, u16 skew) 1012 struct sk_buff *skb)
1008{ 1013{
1009 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1014 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1010 unsigned long timo; 1015 unsigned long timo;
@@ -1023,11 +1028,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1023 1028
1024 switch (sp->hdr.type) { 1029 switch (sp->hdr.type) {
1025 case RXRPC_PACKET_TYPE_DATA: 1030 case RXRPC_PACKET_TYPE_DATA:
1026 rxrpc_input_data(call, skb, skew); 1031 rxrpc_input_data(call, skb);
1027 break; 1032 goto no_free;
1028 1033
1029 case RXRPC_PACKET_TYPE_ACK: 1034 case RXRPC_PACKET_TYPE_ACK:
1030 rxrpc_input_ack(call, skb, skew); 1035 rxrpc_input_ack(call, skb);
1031 break; 1036 break;
1032 1037
1033 case RXRPC_PACKET_TYPE_BUSY: 1038 case RXRPC_PACKET_TYPE_BUSY:
@@ -1051,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1051 break; 1056 break;
1052 } 1057 }
1053 1058
1059 rxrpc_free_skb(skb, rxrpc_skb_freed);
1060no_free:
1054 _leave(""); 1061 _leave("");
1055} 1062}
1056 1063
@@ -1108,8 +1115,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
1108{ 1115{
1109 _enter("%p,%p", local, skb); 1116 _enter("%p,%p", local, skb);
1110 1117
1111 skb_queue_tail(&local->event_queue, skb); 1118 if (rxrpc_get_local_maybe(local)) {
1112 rxrpc_queue_local(local); 1119 skb_queue_tail(&local->event_queue, skb);
1120 rxrpc_queue_local(local);
1121 } else {
1122 rxrpc_free_skb(skb, rxrpc_skb_freed);
1123 }
1113} 1124}
1114 1125
1115/* 1126/*
@@ -1119,8 +1130,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
1119{ 1130{
1120 CHECK_SLAB_OKAY(&local->usage); 1131 CHECK_SLAB_OKAY(&local->usage);
1121 1132
1122 skb_queue_tail(&local->reject_queue, skb); 1133 if (rxrpc_get_local_maybe(local)) {
1123 rxrpc_queue_local(local); 1134 skb_queue_tail(&local->reject_queue, skb);
1135 rxrpc_queue_local(local);
1136 } else {
1137 rxrpc_free_skb(skb, rxrpc_skb_freed);
1138 }
1124} 1139}
1125 1140
1126/* 1141/*
@@ -1173,7 +1188,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1173 struct rxrpc_peer *peer = NULL; 1188 struct rxrpc_peer *peer = NULL;
1174 struct rxrpc_sock *rx = NULL; 1189 struct rxrpc_sock *rx = NULL;
1175 unsigned int channel; 1190 unsigned int channel;
1176 int skew = 0;
1177 1191
1178 _enter("%p", udp_sk); 1192 _enter("%p", udp_sk);
1179 1193
@@ -1184,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1184 if (skb->tstamp == 0) 1198 if (skb->tstamp == 0)
1185 skb->tstamp = ktime_get_real(); 1199 skb->tstamp = ktime_get_real();
1186 1200
1187 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1201 rxrpc_new_skb(skb, rxrpc_skb_received);
1188 1202
1189 skb_pull(skb, sizeof(struct udphdr)); 1203 skb_pull(skb, sizeof(struct udphdr));
1190 1204
@@ -1201,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1201 static int lose; 1215 static int lose;
1202 if ((lose++ & 7) == 7) { 1216 if ((lose++ & 7) == 7) {
1203 trace_rxrpc_rx_lose(sp); 1217 trace_rxrpc_rx_lose(sp);
1204 rxrpc_free_skb(skb, rxrpc_skb_rx_lost); 1218 rxrpc_free_skb(skb, rxrpc_skb_lost);
1205 return 0; 1219 return 0;
1206 } 1220 }
1207 } 1221 }
@@ -1233,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1233 if (sp->hdr.callNumber == 0 || 1247 if (sp->hdr.callNumber == 0 ||
1234 sp->hdr.seq == 0) 1248 sp->hdr.seq == 0)
1235 goto bad_message; 1249 goto bad_message;
1236 if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1250 if (!rxrpc_validate_data(skb))
1237 !rxrpc_validate_jumbo(skb))
1238 goto bad_message; 1251 goto bad_message;
1252
1253 /* Unshare the packet so that it can be modified for in-place
1254 * decryption.
1255 */
1256 if (sp->hdr.securityIndex != 0) {
1257 struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
1258 if (!nskb) {
1259 rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
1260 goto out;
1261 }
1262
1263 if (nskb != skb) {
1264 rxrpc_eaten_skb(skb, rxrpc_skb_received);
1265 rxrpc_new_skb(skb, rxrpc_skb_unshared);
1266 skb = nskb;
1267 sp = rxrpc_skb(skb);
1268 }
1269 }
1239 break; 1270 break;
1240 1271
1241 case RXRPC_PACKET_TYPE_CHALLENGE: 1272 case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -1301,15 +1332,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1301 goto out; 1332 goto out;
1302 } 1333 }
1303 1334
1304 /* Note the serial number skew here */ 1335 if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
1305 skew = (int)sp->hdr.serial - (int)conn->hi_serial; 1336 conn->hi_serial = sp->hdr.serial;
1306 if (skew >= 0) {
1307 if (skew > 0)
1308 conn->hi_serial = sp->hdr.serial;
1309 } else {
1310 skew = -skew;
1311 skew = min(skew, 65535);
1312 }
1313 1337
1314 /* Call-bound packets are routed by connection channel. */ 1338 /* Call-bound packets are routed by connection channel. */
1315 channel = sp->hdr.cid & RXRPC_CHANNELMASK; 1339 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -1372,15 +1396,18 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1372 call = rxrpc_new_incoming_call(local, rx, skb); 1396 call = rxrpc_new_incoming_call(local, rx, skb);
1373 if (!call) 1397 if (!call)
1374 goto reject_packet; 1398 goto reject_packet;
1375 rxrpc_send_ping(call, skb, skew); 1399 rxrpc_send_ping(call, skb);
1376 mutex_unlock(&call->user_mutex); 1400 mutex_unlock(&call->user_mutex);
1377 } 1401 }
1378 1402
1379 rxrpc_input_call_packet(call, skb, skew); 1403 /* Process a call packet; this either discards or passes on the ref
1380 goto discard; 1404 * elsewhere.
1405 */
1406 rxrpc_input_call_packet(call, skb);
1407 goto out;
1381 1408
1382discard: 1409discard:
1383 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1410 rxrpc_free_skb(skb, rxrpc_skb_freed);
1384out: 1411out:
1385 trace_rxrpc_rx_done(0, 0); 1412 trace_rxrpc_rx_done(0, 0);
1386 return 0; 1413 return 0;
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index e93a78f7c05e..3ce6d628cd75 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
90 if (skb) { 90 if (skb) {
91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
92 92
93 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 93 rxrpc_see_skb(skb, rxrpc_skb_seen);
94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type); 94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
95 95
96 switch (sp->hdr.type) { 96 switch (sp->hdr.type) {
@@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
108 break; 108 break;
109 } 109 }
110 110
111 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 111 rxrpc_free_skb(skb, rxrpc_skb_freed);
112 } 112 }
113 113
114 _leave(""); 114 _leave("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b1c71bad510b..36587260cabd 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
80 if (local) { 80 if (local) {
81 atomic_set(&local->usage, 1); 81 atomic_set(&local->usage, 1);
82 atomic_set(&local->active_users, 1);
82 local->rxnet = rxnet; 83 local->rxnet = rxnet;
83 INIT_LIST_HEAD(&local->link); 84 INIT_LIST_HEAD(&local->link);
84 INIT_WORK(&local->processor, rxrpc_local_processor); 85 INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
92 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 93 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
93 memcpy(&local->srx, srx, sizeof(*srx)); 94 memcpy(&local->srx, srx, sizeof(*srx));
94 local->srx.srx_service = 0; 95 local->srx.srx_service = 0;
95 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); 96 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
96 } 97 }
97 98
98 _leave(" = %p", local); 99 _leave(" = %p", local);
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
266 * bind the transport socket may still fail if we're attempting 267 * bind the transport socket may still fail if we're attempting
267 * to use a local address that the dying object is still using. 268 * to use a local address that the dying object is still using.
268 */ 269 */
269 if (!rxrpc_get_local_maybe(local)) { 270 if (!rxrpc_use_local(local))
270 cursor = cursor->next;
271 list_del_init(&local->link);
272 break; 271 break;
273 }
274 272
275 age = "old"; 273 age = "old";
276 goto found; 274 goto found;
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
284 if (ret < 0) 282 if (ret < 0)
285 goto sock_error; 283 goto sock_error;
286 284
287 list_add_tail(&local->link, cursor); 285 if (cursor != &rxnet->local_endpoints)
286 list_replace_init(cursor, &local->link);
287 else
288 list_add_tail(&local->link, cursor);
288 age = "new"; 289 age = "new";
289 290
290found: 291found:
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
320 int n; 321 int n;
321 322
322 n = atomic_inc_return(&local->usage); 323 n = atomic_inc_return(&local->usage);
323 trace_rxrpc_local(local, rxrpc_local_got, n, here); 324 trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
324 return local; 325 return local;
325} 326}
326 327
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
334 if (local) { 335 if (local) {
335 int n = atomic_fetch_add_unless(&local->usage, 1, 0); 336 int n = atomic_fetch_add_unless(&local->usage, 1, 0);
336 if (n > 0) 337 if (n > 0)
337 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 338 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
339 n + 1, here);
338 else 340 else
339 local = NULL; 341 local = NULL;
340 } 342 }
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
342} 344}
343 345
344/* 346/*
345 * Queue a local endpoint. 347 * Queue a local endpoint and pass the caller's reference to the work item.
346 */ 348 */
347void rxrpc_queue_local(struct rxrpc_local *local) 349void rxrpc_queue_local(struct rxrpc_local *local)
348{ 350{
349 const void *here = __builtin_return_address(0); 351 const void *here = __builtin_return_address(0);
352 unsigned int debug_id = local->debug_id;
353 int n = atomic_read(&local->usage);
350 354
351 if (rxrpc_queue_work(&local->processor)) 355 if (rxrpc_queue_work(&local->processor))
352 trace_rxrpc_local(local, rxrpc_local_queued, 356 trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
353 atomic_read(&local->usage), here); 357 else
354} 358 rxrpc_put_local(local);
355
356/*
357 * A local endpoint reached its end of life.
358 */
359static void __rxrpc_put_local(struct rxrpc_local *local)
360{
361 _enter("%d", local->debug_id);
362 rxrpc_queue_work(&local->processor);
363} 359}
364 360
365/* 361/*
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
372 368
373 if (local) { 369 if (local) {
374 n = atomic_dec_return(&local->usage); 370 n = atomic_dec_return(&local->usage);
375 trace_rxrpc_local(local, rxrpc_local_put, n, here); 371 trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
376 372
377 if (n == 0) 373 if (n == 0)
378 __rxrpc_put_local(local); 374 call_rcu(&local->rcu, rxrpc_local_rcu);
375 }
376}
377
378/*
379 * Start using a local endpoint.
380 */
381struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
382{
383 unsigned int au;
384
385 local = rxrpc_get_local_maybe(local);
386 if (!local)
387 return NULL;
388
389 au = atomic_fetch_add_unless(&local->active_users, 1, 0);
390 if (au == 0) {
391 rxrpc_put_local(local);
392 return NULL;
393 }
394
395 return local;
396}
397
398/*
399 * Cease using a local endpoint. Once the number of active users reaches 0, we
400 * start the closure of the transport in the work processor.
401 */
402void rxrpc_unuse_local(struct rxrpc_local *local)
403{
404 unsigned int au;
405
406 if (local) {
407 au = atomic_dec_return(&local->active_users);
408 if (au == 0)
409 rxrpc_queue_local(local);
410 else
411 rxrpc_put_local(local);
379 } 412 }
380} 413}
381 414
@@ -393,21 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
393 426
394 _enter("%d", local->debug_id); 427 _enter("%d", local->debug_id);
395 428
396 /* We can get a race between an incoming call packet queueing the
397 * processor again and the work processor starting the destruction
398 * process which will shut down the UDP socket.
399 */
400 if (local->dead) {
401 _leave(" [already dead]");
402 return;
403 }
404 local->dead = true; 429 local->dead = true;
405 430
406 mutex_lock(&rxnet->local_mutex); 431 mutex_lock(&rxnet->local_mutex);
407 list_del_init(&local->link); 432 list_del_init(&local->link);
408 mutex_unlock(&rxnet->local_mutex); 433 mutex_unlock(&rxnet->local_mutex);
409 434
410 ASSERT(RB_EMPTY_ROOT(&local->client_conns)); 435 rxrpc_clean_up_local_conns(local);
436 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
411 ASSERT(!local->service); 437 ASSERT(!local->service);
412 438
413 if (socket) { 439 if (socket) {
@@ -422,13 +448,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
422 */ 448 */
423 rxrpc_purge_queue(&local->reject_queue); 449 rxrpc_purge_queue(&local->reject_queue);
424 rxrpc_purge_queue(&local->event_queue); 450 rxrpc_purge_queue(&local->event_queue);
425
426 _debug("rcu local %d", local->debug_id);
427 call_rcu(&local->rcu, rxrpc_local_rcu);
428} 451}
429 452
430/* 453/*
431 * Process events on an endpoint 454 * Process events on an endpoint. The work item carries a ref which
455 * we must release.
432 */ 456 */
433static void rxrpc_local_processor(struct work_struct *work) 457static void rxrpc_local_processor(struct work_struct *work)
434{ 458{
@@ -436,13 +460,15 @@ static void rxrpc_local_processor(struct work_struct *work)
436 container_of(work, struct rxrpc_local, processor); 460 container_of(work, struct rxrpc_local, processor);
437 bool again; 461 bool again;
438 462
439 trace_rxrpc_local(local, rxrpc_local_processing, 463 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
440 atomic_read(&local->usage), NULL); 464 atomic_read(&local->usage), NULL);
441 465
442 do { 466 do {
443 again = false; 467 again = false;
444 if (atomic_read(&local->usage) == 0) 468 if (atomic_read(&local->active_users) == 0) {
445 return rxrpc_local_destroyer(local); 469 rxrpc_local_destroyer(local);
470 break;
471 }
446 472
447 if (!skb_queue_empty(&local->reject_queue)) { 473 if (!skb_queue_empty(&local->reject_queue)) {
448 rxrpc_reject_packets(local); 474 rxrpc_reject_packets(local);
@@ -454,6 +480,8 @@ static void rxrpc_local_processor(struct work_struct *work)
454 again = true; 480 again = true;
455 } 481 }
456 } while (again); 482 } while (again);
483
484 rxrpc_put_local(local);
457} 485}
458 486
459/* 487/*
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 948e3fe249ec..935bb60fff56 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
87 *_top = top; 87 *_top = top;
88 88
89 pkt->ack.bufferSpace = htons(8); 89 pkt->ack.bufferSpace = htons(8);
90 pkt->ack.maxSkew = htons(call->ackr_skew); 90 pkt->ack.maxSkew = htons(0);
91 pkt->ack.firstPacket = htonl(hard_ack + 1); 91 pkt->ack.firstPacket = htonl(hard_ack + 1);
92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq); 92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
93 pkt->ack.serial = htonl(serial); 93 pkt->ack.serial = htonl(serial);
@@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
228 if (ping) 228 if (ping)
229 clear_bit(RXRPC_CALL_PINGING, &call->flags); 229 clear_bit(RXRPC_CALL_PINGING, &call->flags);
230 rxrpc_propose_ACK(call, pkt->ack.reason, 230 rxrpc_propose_ACK(call, pkt->ack.reason,
231 ntohs(pkt->ack.maxSkew),
232 ntohl(pkt->ack.serial), 231 ntohl(pkt->ack.serial),
233 false, true, 232 false, true,
234 rxrpc_propose_ack_retry_tx); 233 rxrpc_propose_ack_retry_tx);
@@ -566,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
566 memset(&whdr, 0, sizeof(whdr)); 565 memset(&whdr, 0, sizeof(whdr));
567 566
568 while ((skb = skb_dequeue(&local->reject_queue))) { 567 while ((skb = skb_dequeue(&local->reject_queue))) {
569 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 568 rxrpc_see_skb(skb, rxrpc_skb_seen);
570 sp = rxrpc_skb(skb); 569 sp = rxrpc_skb(skb);
571 570
572 switch (skb->mark) { 571 switch (skb->mark) {
@@ -582,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
582 ioc = 2; 581 ioc = 2;
583 break; 582 break;
584 default: 583 default:
585 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 584 rxrpc_free_skb(skb, rxrpc_skb_freed);
586 continue; 585 continue;
587 } 586 }
588 587
@@ -607,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
607 rxrpc_tx_point_reject); 606 rxrpc_tx_point_reject);
608 } 607 }
609 608
610 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 609 rxrpc_free_skb(skb, rxrpc_skb_freed);
611 } 610 }
612 611
613 _leave(""); 612 _leave("");
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 9f2f45c09e58..c97ebdc043e4 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk)
163 _leave("UDP socket errqueue empty"); 163 _leave("UDP socket errqueue empty");
164 return; 164 return;
165 } 165 }
166 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 166 rxrpc_new_skb(skb, rxrpc_skb_received);
167 serr = SKB_EXT_ERR(skb); 167 serr = SKB_EXT_ERR(skb);
168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { 168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
169 _leave("UDP empty message"); 169 _leave("UDP empty message");
170 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 170 rxrpc_free_skb(skb, rxrpc_skb_freed);
171 return; 171 return;
172 } 172 }
173 173
@@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk)
177 peer = NULL; 177 peer = NULL;
178 if (!peer) { 178 if (!peer) {
179 rcu_read_unlock(); 179 rcu_read_unlock();
180 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 180 rxrpc_free_skb(skb, rxrpc_skb_freed);
181 _leave(" [no peer]"); 181 _leave(" [no peer]");
182 return; 182 return;
183 } 183 }
@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk)
189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) { 189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
190 rxrpc_adjust_mtu(peer, serr); 190 rxrpc_adjust_mtu(peer, serr);
191 rcu_read_unlock(); 191 rcu_read_unlock();
192 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 192 rxrpc_free_skb(skb, rxrpc_skb_freed);
193 rxrpc_put_peer(peer); 193 rxrpc_put_peer(peer);
194 _leave(" [MTU update]"); 194 _leave(" [MTU update]");
195 return; 195 return;
@@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk)
197 197
198 rxrpc_store_error(peer, serr); 198 rxrpc_store_error(peer, serr);
199 rcu_read_unlock(); 199 rcu_read_unlock();
200 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 200 rxrpc_free_skb(skb, rxrpc_skb_freed);
201 rxrpc_put_peer(peer); 201 rxrpc_put_peer(peer);
202 202
203 _leave(""); 203 _leave("");
@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
378 spin_lock_bh(&rxnet->peer_hash_lock); 378 spin_lock_bh(&rxnet->peer_hash_lock);
379 list_add_tail(&peer->keepalive_link, 379 list_add_tail(&peer->keepalive_link,
380 &rxnet->peer_keepalive[slot & mask]); 380 &rxnet->peer_keepalive[slot & mask]);
381 rxrpc_put_peer(peer); 381 rxrpc_put_peer_locked(peer);
382 } 382 }
383 383
384 spin_unlock_bh(&rxnet->peer_hash_lock); 384 spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 9d3ce81cf8ae..9c3ac96f71cb 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -437,6 +437,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
437} 437}
438 438
439/* 439/*
440 * Drop a ref on a peer record where the caller already holds the
441 * peer_hash_lock.
442 */
443void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
444{
445 const void *here = __builtin_return_address(0);
446 int n;
447
448 n = atomic_dec_return(&peer->usage);
449 trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
450 if (n == 0) {
451 hash_del_rcu(&peer->hash_link);
452 list_del_init(&peer->keepalive_link);
453 kfree_rcu(peer, rcu);
454 }
455}
456
457/*
440 * Make sure all peer records have been discarded. 458 * Make sure all peer records have been discarded.
441 */ 459 */
442void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) 460void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 99ce322d7caa..49bb972539aa 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -89,6 +89,15 @@ struct rxrpc_jumbo_header {
89#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ 89#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
90#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) 90#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
91 91
92/*
93 * The maximum number of subpackets that can possibly fit in a UDP packet is:
94 *
95 * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1
96 * = ((65535 - 28 - 28) / 1416) + 1
97 * = 46 non-terminal packets and 1 terminal packet.
98 */
99#define RXRPC_MAX_NR_JUMBO 47
100
92/*****************************************************************************/ 101/*****************************************************************************/
93/* 102/*
94 * on-the-wire Rx ACK packet data payload 103 * on-the-wire Rx ACK packet data payload
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 5abf46cf9e6c..3b0becb12041 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
142 142
143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, 144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
145 rxrpc_propose_ack_terminal_ack); 145 rxrpc_propose_ack_terminal_ack);
146 //rxrpc_send_ack_packet(call, false, NULL); 146 //rxrpc_send_ack_packet(call, false, NULL);
147 } 147 }
@@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
161 write_unlock_bh(&call->state_lock); 161 write_unlock_bh(&call->state_lock);
162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
163 rxrpc_propose_ack_processing_op); 163 rxrpc_propose_ack_processing_op);
164 break; 164 break;
165 default: 165 default:
@@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
177 struct sk_buff *skb; 177 struct sk_buff *skb;
178 rxrpc_serial_t serial; 178 rxrpc_serial_t serial;
179 rxrpc_seq_t hard_ack, top; 179 rxrpc_seq_t hard_ack, top;
180 u8 flags; 180 bool last = false;
181 u8 subpacket;
181 int ix; 182 int ix;
182 183
183 _enter("%d", call->debug_id); 184 _enter("%d", call->debug_id);
@@ -189,30 +190,32 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
189 hard_ack++; 190 hard_ack++;
190 ix = hard_ack & RXRPC_RXTX_BUFF_MASK; 191 ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
191 skb = call->rxtx_buffer[ix]; 192 skb = call->rxtx_buffer[ix];
192 rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); 193 rxrpc_see_skb(skb, rxrpc_skb_rotated);
193 sp = rxrpc_skb(skb); 194 sp = rxrpc_skb(skb);
194 flags = sp->hdr.flags; 195
195 serial = sp->hdr.serial; 196 subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
196 if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) 197 serial = sp->hdr.serial + subpacket;
197 serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; 198
199 if (subpacket == sp->nr_subpackets - 1 &&
200 sp->rx_flags & RXRPC_SKB_INCL_LAST)
201 last = true;
198 202
199 call->rxtx_buffer[ix] = NULL; 203 call->rxtx_buffer[ix] = NULL;
200 call->rxtx_annotations[ix] = 0; 204 call->rxtx_annotations[ix] = 0;
201 /* Barrier against rxrpc_input_data(). */ 205 /* Barrier against rxrpc_input_data(). */
202 smp_store_release(&call->rx_hard_ack, hard_ack); 206 smp_store_release(&call->rx_hard_ack, hard_ack);
203 207
204 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 208 rxrpc_free_skb(skb, rxrpc_skb_freed);
205 209
206 _debug("%u,%u,%02x", hard_ack, top, flags);
207 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); 210 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
208 if (flags & RXRPC_LAST_PACKET) { 211 if (last) {
209 rxrpc_end_rx_phase(call, serial); 212 rxrpc_end_rx_phase(call, serial);
210 } else { 213 } else {
211 /* Check to see if there's an ACK that needs sending. */ 214 /* Check to see if there's an ACK that needs sending. */
212 if (after_eq(hard_ack, call->ackr_consumed + 2) || 215 if (after_eq(hard_ack, call->ackr_consumed + 2) ||
213 after_eq(top, call->ackr_seen + 2) || 216 after_eq(top, call->ackr_seen + 2) ||
214 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 217 (hard_ack == top && after(hard_ack, call->ackr_consumed)))
215 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 218 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
216 true, true, 219 true, true,
217 rxrpc_propose_ack_rotate_rx); 220 rxrpc_propose_ack_rotate_rx);
218 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) 221 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
@@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
233 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 236 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
234 rxrpc_seq_t seq = sp->hdr.seq; 237 rxrpc_seq_t seq = sp->hdr.seq;
235 u16 cksum = sp->hdr.cksum; 238 u16 cksum = sp->hdr.cksum;
239 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
236 240
237 _enter(""); 241 _enter("");
238 242
239 /* For all but the head jumbo subpacket, the security checksum is in a 243 /* For all but the head jumbo subpacket, the security checksum is in a
240 * jumbo header immediately prior to the data. 244 * jumbo header immediately prior to the data.
241 */ 245 */
242 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { 246 if (subpacket > 0) {
243 __be16 tmp; 247 __be16 tmp;
244 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) 248 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
245 BUG(); 249 BUG();
246 cksum = ntohs(tmp); 250 cksum = ntohs(tmp);
247 seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; 251 seq += subpacket;
248 } 252 }
249 253
250 return call->conn->security->verify_packet(call, skb, offset, len, 254 return call->conn->security->verify_packet(call, skb, offset, len,
@@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
265 u8 *_annotation, 269 u8 *_annotation,
266 unsigned int *_offset, unsigned int *_len) 270 unsigned int *_offset, unsigned int *_len)
267{ 271{
272 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
268 unsigned int offset = sizeof(struct rxrpc_wire_header); 273 unsigned int offset = sizeof(struct rxrpc_wire_header);
269 unsigned int len; 274 unsigned int len;
270 int ret; 275 int ret;
271 u8 annotation = *_annotation; 276 u8 annotation = *_annotation;
277 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
272 278
273 /* Locate the subpacket */ 279 /* Locate the subpacket */
280 offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
274 len = skb->len - offset; 281 len = skb->len - offset;
275 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { 282 if (subpacket < sp->nr_subpackets - 1)
276 offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * 283 len = RXRPC_JUMBO_DATALEN;
277 RXRPC_JUMBO_SUBPKTLEN);
278 len = (annotation & RXRPC_RX_ANNO_JLAST) ?
279 skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
280 }
281 284
282 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 285 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
283 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); 286 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
303{ 306{
304 struct rxrpc_skb_priv *sp; 307 struct rxrpc_skb_priv *sp;
305 struct sk_buff *skb; 308 struct sk_buff *skb;
309 rxrpc_serial_t serial;
306 rxrpc_seq_t hard_ack, top, seq; 310 rxrpc_seq_t hard_ack, top, seq;
307 size_t remain; 311 size_t remain;
308 bool last; 312 bool last;
@@ -336,12 +340,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
336 break; 340 break;
337 } 341 }
338 smp_rmb(); 342 smp_rmb();
339 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 343 rxrpc_see_skb(skb, rxrpc_skb_seen);
340 sp = rxrpc_skb(skb); 344 sp = rxrpc_skb(skb);
341 345
342 if (!(flags & MSG_PEEK)) 346 if (!(flags & MSG_PEEK)) {
347 serial = sp->hdr.serial;
348 serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
343 trace_rxrpc_receive(call, rxrpc_receive_front, 349 trace_rxrpc_receive(call, rxrpc_receive_front,
344 sp->hdr.serial, seq); 350 serial, seq);
351 }
345 352
346 if (msg) 353 if (msg)
347 sock_recv_timestamp(msg, sock->sk, skb); 354 sock_recv_timestamp(msg, sock->sk, skb);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index ae8cd8926456..c60c520fde7c 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -187,10 +187,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
187 struct rxrpc_skb_priv *sp; 187 struct rxrpc_skb_priv *sp;
188 struct rxrpc_crypt iv; 188 struct rxrpc_crypt iv;
189 struct scatterlist sg[16]; 189 struct scatterlist sg[16];
190 struct sk_buff *trailer;
191 unsigned int len; 190 unsigned int len;
192 u16 check; 191 u16 check;
193 int nsg;
194 int err; 192 int err;
195 193
196 sp = rxrpc_skb(skb); 194 sp = rxrpc_skb(skb);
@@ -214,15 +212,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
214 crypto_skcipher_encrypt(req); 212 crypto_skcipher_encrypt(req);
215 213
216 /* we want to encrypt the skbuff in-place */ 214 /* we want to encrypt the skbuff in-place */
217 nsg = skb_cow_data(skb, 0, &trailer); 215 err = -EMSGSIZE;
218 err = -ENOMEM; 216 if (skb_shinfo(skb)->nr_frags > 16)
219 if (nsg < 0 || nsg > 16)
220 goto out; 217 goto out;
221 218
222 len = data_size + call->conn->size_align - 1; 219 len = data_size + call->conn->size_align - 1;
223 len &= ~(call->conn->size_align - 1); 220 len &= ~(call->conn->size_align - 1);
224 221
225 sg_init_table(sg, nsg); 222 sg_init_table(sg, ARRAY_SIZE(sg));
226 err = skb_to_sgvec(skb, sg, 0, len); 223 err = skb_to_sgvec(skb, sg, 0, len);
227 if (unlikely(err < 0)) 224 if (unlikely(err < 0))
228 goto out; 225 goto out;
@@ -319,11 +316,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
319 struct rxkad_level1_hdr sechdr; 316 struct rxkad_level1_hdr sechdr;
320 struct rxrpc_crypt iv; 317 struct rxrpc_crypt iv;
321 struct scatterlist sg[16]; 318 struct scatterlist sg[16];
322 struct sk_buff *trailer;
323 bool aborted; 319 bool aborted;
324 u32 data_size, buf; 320 u32 data_size, buf;
325 u16 check; 321 u16 check;
326 int nsg, ret; 322 int ret;
327 323
328 _enter(""); 324 _enter("");
329 325
@@ -336,11 +332,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
336 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 332 /* Decrypt the skbuff in-place. TODO: We really want to decrypt
337 * directly into the target buffer. 333 * directly into the target buffer.
338 */ 334 */
339 nsg = skb_cow_data(skb, 0, &trailer); 335 sg_init_table(sg, ARRAY_SIZE(sg));
340 if (nsg < 0 || nsg > 16)
341 goto nomem;
342
343 sg_init_table(sg, nsg);
344 ret = skb_to_sgvec(skb, sg, offset, 8); 336 ret = skb_to_sgvec(skb, sg, offset, 8);
345 if (unlikely(ret < 0)) 337 if (unlikely(ret < 0))
346 return ret; 338 return ret;
@@ -388,10 +380,6 @@ protocol_error:
388 if (aborted) 380 if (aborted)
389 rxrpc_send_abort_packet(call); 381 rxrpc_send_abort_packet(call);
390 return -EPROTO; 382 return -EPROTO;
391
392nomem:
393 _leave(" = -ENOMEM");
394 return -ENOMEM;
395} 383}
396 384
397/* 385/*
@@ -406,7 +394,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
406 struct rxkad_level2_hdr sechdr; 394 struct rxkad_level2_hdr sechdr;
407 struct rxrpc_crypt iv; 395 struct rxrpc_crypt iv;
408 struct scatterlist _sg[4], *sg; 396 struct scatterlist _sg[4], *sg;
409 struct sk_buff *trailer;
410 bool aborted; 397 bool aborted;
411 u32 data_size, buf; 398 u32 data_size, buf;
412 u16 check; 399 u16 check;
@@ -423,12 +410,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
423 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 410 /* Decrypt the skbuff in-place. TODO: We really want to decrypt
424 * directly into the target buffer. 411 * directly into the target buffer.
425 */ 412 */
426 nsg = skb_cow_data(skb, 0, &trailer);
427 if (nsg < 0)
428 goto nomem;
429
430 sg = _sg; 413 sg = _sg;
431 if (unlikely(nsg > 4)) { 414 nsg = skb_shinfo(skb)->nr_frags;
415 if (nsg <= 4) {
416 nsg = 4;
417 } else {
432 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); 418 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
433 if (!sg) 419 if (!sg)
434 goto nomem; 420 goto nomem;
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5d3f33ce6d41..6a1547b270fe 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
176 skb->tstamp = ktime_get_real(); 176 skb->tstamp = ktime_get_real();
177 177
178 ix = seq & RXRPC_RXTX_BUFF_MASK; 178 ix = seq & RXRPC_RXTX_BUFF_MASK;
179 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 179 rxrpc_get_skb(skb, rxrpc_skb_got);
180 call->rxtx_annotations[ix] = annotation; 180 call->rxtx_annotations[ix] = annotation;
181 smp_wmb(); 181 smp_wmb();
182 call->rxtx_buffer[ix] = skb; 182 call->rxtx_buffer[ix] = skb;
@@ -226,6 +226,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
226 rxrpc_set_call_completion(call, 226 rxrpc_set_call_completion(call,
227 RXRPC_CALL_LOCAL_ERROR, 227 RXRPC_CALL_LOCAL_ERROR,
228 0, ret); 228 0, ret);
229 rxrpc_notify_socket(call);
229 goto out; 230 goto out;
230 } 231 }
231 _debug("need instant resend %d", ret); 232 _debug("need instant resend %d", ret);
@@ -247,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
247 } 248 }
248 249
249out: 250out:
250 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 251 rxrpc_free_skb(skb, rxrpc_skb_freed);
251 _leave(" = %d", ret); 252 _leave(" = %d", ret);
252 return ret; 253 return ret;
253} 254}
@@ -288,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
288 289
289 skb = call->tx_pending; 290 skb = call->tx_pending;
290 call->tx_pending = NULL; 291 call->tx_pending = NULL;
291 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 292 rxrpc_see_skb(skb, rxrpc_skb_seen);
292 293
293 copied = 0; 294 copied = 0;
294 do { 295 do {
@@ -335,7 +336,9 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
335 if (!skb) 336 if (!skb)
336 goto maybe_error; 337 goto maybe_error;
337 338
338 rxrpc_new_skb(skb, rxrpc_skb_tx_new); 339 sp = rxrpc_skb(skb);
340 sp->rx_flags |= RXRPC_SKB_TX_BUFFER;
341 rxrpc_new_skb(skb, rxrpc_skb_new);
339 342
340 _debug("ALLOC SEND %p", skb); 343 _debug("ALLOC SEND %p", skb);
341 344
@@ -345,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
345 skb_reserve(skb, call->conn->security_size); 348 skb_reserve(skb, call->conn->security_size);
346 skb->len += call->conn->security_size; 349 skb->len += call->conn->security_size;
347 350
348 sp = rxrpc_skb(skb);
349 sp->remain = chunk; 351 sp->remain = chunk;
350 if (sp->remain > skb_tailroom(skb)) 352 if (sp->remain > skb_tailroom(skb))
351 sp->remain = skb_tailroom(skb); 353 sp->remain = skb_tailroom(skb);
@@ -438,7 +440,7 @@ out:
438 return ret; 440 return ret;
439 441
440call_terminated: 442call_terminated:
441 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 443 rxrpc_free_skb(skb, rxrpc_skb_freed);
442 _leave(" = %d", call->error); 444 _leave(" = %d", call->error);
443 return call->error; 445 return call->error;
444 446
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 9ad5045b7c2f..0348d2bf6f7d 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -14,7 +14,8 @@
14#include <net/af_rxrpc.h> 14#include <net/af_rxrpc.h>
15#include "ar-internal.h" 15#include "ar-internal.h"
16 16
17#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) 17#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
18#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
18 19
19/* 20/*
20 * Note the allocation or reception of a socket buffer. 21 * Note the allocation or reception of a socket buffer.
@@ -22,8 +23,9 @@
22void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 23void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
23{ 24{
24 const void *here = __builtin_return_address(0); 25 const void *here = __builtin_return_address(0);
25 int n = atomic_inc_return(select_skb_count(op)); 26 int n = atomic_inc_return(select_skb_count(skb));
26 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 27 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
28 rxrpc_skb(skb)->rx_flags, here);
27} 29}
28 30
29/* 31/*
@@ -33,8 +35,9 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
33{ 35{
34 const void *here = __builtin_return_address(0); 36 const void *here = __builtin_return_address(0);
35 if (skb) { 37 if (skb) {
36 int n = atomic_read(select_skb_count(op)); 38 int n = atomic_read(select_skb_count(skb));
37 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 39 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
40 rxrpc_skb(skb)->rx_flags, here);
38 } 41 }
39} 42}
40 43
@@ -44,12 +47,23 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
44void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 47void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
45{ 48{
46 const void *here = __builtin_return_address(0); 49 const void *here = __builtin_return_address(0);
47 int n = atomic_inc_return(select_skb_count(op)); 50 int n = atomic_inc_return(select_skb_count(skb));
48 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 51 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
52 rxrpc_skb(skb)->rx_flags, here);
49 skb_get(skb); 53 skb_get(skb);
50} 54}
51 55
52/* 56/*
57 * Note the dropping of a ref on a socket buffer by the core.
58 */
59void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
60{
61 const void *here = __builtin_return_address(0);
62 int n = atomic_inc_return(&rxrpc_n_rx_skbs);
63 trace_rxrpc_skb(skb, op, 0, n, 0, here);
64}
65
66/*
53 * Note the destruction of a socket buffer. 67 * Note the destruction of a socket buffer.
54 */ 68 */
55void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 69void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
@@ -58,8 +72,9 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
58 if (skb) { 72 if (skb) {
59 int n; 73 int n;
60 CHECK_SLAB_OKAY(&skb->users); 74 CHECK_SLAB_OKAY(&skb->users);
61 n = atomic_dec_return(select_skb_count(op)); 75 n = atomic_dec_return(select_skb_count(skb));
62 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 76 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
77 rxrpc_skb(skb)->rx_flags, here);
63 kfree_skb(skb); 78 kfree_skb(skb);
64 } 79 }
65} 80}
@@ -72,9 +87,10 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
72 const void *here = __builtin_return_address(0); 87 const void *here = __builtin_return_address(0);
73 struct sk_buff *skb; 88 struct sk_buff *skb;
74 while ((skb = skb_dequeue((list))) != NULL) { 89 while ((skb = skb_dequeue((list))) != NULL) {
75 int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); 90 int n = atomic_dec_return(select_skb_count(skb));
76 trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, 91 trace_rxrpc_skb(skb, rxrpc_skb_purged,
77 refcount_read(&skb->users), n, here); 92 refcount_read(&skb->users), n,
93 rxrpc_skb(skb)->rx_flags, here);
78 kfree_skb(skb); 94 kfree_skb(skb);
79 } 95 }
80} 96}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 8126b26f125e..04b7bd4ec751 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -285,6 +285,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
285 struct tcf_bpf *prog; 285 struct tcf_bpf *prog;
286 bool is_bpf, is_ebpf; 286 bool is_bpf, is_ebpf;
287 int ret, res = 0; 287 int ret, res = 0;
288 u32 index;
288 289
289 if (!nla) 290 if (!nla)
290 return -EINVAL; 291 return -EINVAL;
@@ -298,13 +299,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
298 return -EINVAL; 299 return -EINVAL;
299 300
300 parm = nla_data(tb[TCA_ACT_BPF_PARMS]); 301 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
301 302 index = parm->index;
302 ret = tcf_idr_check_alloc(tn, &parm->index, act, bind); 303 ret = tcf_idr_check_alloc(tn, &index, act, bind);
303 if (!ret) { 304 if (!ret) {
304 ret = tcf_idr_create(tn, parm->index, est, act, 305 ret = tcf_idr_create(tn, index, est, act,
305 &act_bpf_ops, bind, true); 306 &act_bpf_ops, bind, true);
306 if (ret < 0) { 307 if (ret < 0) {
307 tcf_idr_cleanup(tn, parm->index); 308 tcf_idr_cleanup(tn, index);
308 return ret; 309 return ret;
309 } 310 }
310 311
@@ -421,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net)
421{ 422{
422 struct tc_action_net *tn = net_generic(net, bpf_net_id); 423 struct tc_action_net *tn = net_generic(net, bpf_net_id);
423 424
424 return tc_action_net_init(tn, &act_bpf_ops); 425 return tc_action_net_init(net, tn, &act_bpf_ops);
425} 426}
426 427
427static void __net_exit bpf_exit_net(struct list_head *net_list) 428static void __net_exit bpf_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ce36b0f7e1dc..2b43cacf82af 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -103,6 +103,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
103 struct tcf_connmark_info *ci; 103 struct tcf_connmark_info *ci;
104 struct tc_connmark *parm; 104 struct tc_connmark *parm;
105 int ret = 0, err; 105 int ret = 0, err;
106 u32 index;
106 107
107 if (!nla) 108 if (!nla)
108 return -EINVAL; 109 return -EINVAL;
@@ -116,13 +117,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
116 return -EINVAL; 117 return -EINVAL;
117 118
118 parm = nla_data(tb[TCA_CONNMARK_PARMS]); 119 parm = nla_data(tb[TCA_CONNMARK_PARMS]);
119 120 index = parm->index;
120 ret = tcf_idr_check_alloc(tn, &parm->index, a, bind); 121 ret = tcf_idr_check_alloc(tn, &index, a, bind);
121 if (!ret) { 122 if (!ret) {
122 ret = tcf_idr_create(tn, parm->index, est, a, 123 ret = tcf_idr_create(tn, index, est, a,
123 &act_connmark_ops, bind, false); 124 &act_connmark_ops, bind, false);
124 if (ret) { 125 if (ret) {
125 tcf_idr_cleanup(tn, parm->index); 126 tcf_idr_cleanup(tn, index);
126 return ret; 127 return ret;
127 } 128 }
128 129
@@ -230,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net)
230{ 231{
231 struct tc_action_net *tn = net_generic(net, connmark_net_id); 232 struct tc_action_net *tn = net_generic(net, connmark_net_id);
232 233
233 return tc_action_net_init(tn, &act_connmark_ops); 234 return tc_action_net_init(net, tn, &act_connmark_ops);
234} 235}
235 236
236static void __net_exit connmark_exit_net(struct list_head *net_list) 237static void __net_exit connmark_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 621fb22ce2a9..d3cfad88dc3a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -52,6 +52,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
52 struct tc_csum *parm; 52 struct tc_csum *parm;
53 struct tcf_csum *p; 53 struct tcf_csum *p;
54 int ret = 0, err; 54 int ret = 0, err;
55 u32 index;
55 56
56 if (nla == NULL) 57 if (nla == NULL)
57 return -EINVAL; 58 return -EINVAL;
@@ -64,13 +65,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
64 if (tb[TCA_CSUM_PARMS] == NULL) 65 if (tb[TCA_CSUM_PARMS] == NULL)
65 return -EINVAL; 66 return -EINVAL;
66 parm = nla_data(tb[TCA_CSUM_PARMS]); 67 parm = nla_data(tb[TCA_CSUM_PARMS]);
67 68 index = parm->index;
68 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 69 err = tcf_idr_check_alloc(tn, &index, a, bind);
69 if (!err) { 70 if (!err) {
70 ret = tcf_idr_create(tn, parm->index, est, a, 71 ret = tcf_idr_create(tn, index, est, a,
71 &act_csum_ops, bind, true); 72 &act_csum_ops, bind, true);
72 if (ret) { 73 if (ret) {
73 tcf_idr_cleanup(tn, parm->index); 74 tcf_idr_cleanup(tn, index);
74 return ret; 75 return ret;
75 } 76 }
76 ret = ACT_P_CREATED; 77 ret = ACT_P_CREATED;
@@ -713,7 +714,7 @@ static __net_init int csum_init_net(struct net *net)
713{ 714{
714 struct tc_action_net *tn = net_generic(net, csum_net_id); 715 struct tc_action_net *tn = net_generic(net, csum_net_id);
715 716
716 return tc_action_net_init(tn, &act_csum_ops); 717 return tc_action_net_init(net, tn, &act_csum_ops);
717} 718}
718 719
719static void __net_exit csum_exit_net(struct list_head *net_list) 720static void __net_exit csum_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b501ce0cf116..cdd6f3818097 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -666,6 +666,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
666 struct tc_ct *parm; 666 struct tc_ct *parm;
667 struct tcf_ct *c; 667 struct tcf_ct *c;
668 int err, res = 0; 668 int err, res = 0;
669 u32 index;
669 670
670 if (!nla) { 671 if (!nla) {
671 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed"); 672 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
@@ -681,16 +682,16 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
681 return -EINVAL; 682 return -EINVAL;
682 } 683 }
683 parm = nla_data(tb[TCA_CT_PARMS]); 684 parm = nla_data(tb[TCA_CT_PARMS]);
684 685 index = parm->index;
685 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 686 err = tcf_idr_check_alloc(tn, &index, a, bind);
686 if (err < 0) 687 if (err < 0)
687 return err; 688 return err;
688 689
689 if (!err) { 690 if (!err) {
690 err = tcf_idr_create(tn, parm->index, est, a, 691 err = tcf_idr_create(tn, index, est, a,
691 &act_ct_ops, bind, true); 692 &act_ct_ops, bind, true);
692 if (err) { 693 if (err) {
693 tcf_idr_cleanup(tn, parm->index); 694 tcf_idr_cleanup(tn, index);
694 return err; 695 return err;
695 } 696 }
696 res = ACT_P_CREATED; 697 res = ACT_P_CREATED;
@@ -938,7 +939,7 @@ static __net_init int ct_init_net(struct net *net)
938 tn->labels = true; 939 tn->labels = true;
939 } 940 }
940 941
941 return tc_action_net_init(&tn->tn, &act_ct_ops); 942 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
942} 943}
943 944
944static void __net_exit ct_exit_net(struct list_head *net_list) 945static void __net_exit ct_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 10eb2bb99861..0dbcfd1dca7b 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -157,10 +157,10 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
157 struct netlink_ext_ack *extack) 157 struct netlink_ext_ack *extack)
158{ 158{
159 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 159 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
160 u32 dscpmask = 0, dscpstatemask, index;
160 struct nlattr *tb[TCA_CTINFO_MAX + 1]; 161 struct nlattr *tb[TCA_CTINFO_MAX + 1];
161 struct tcf_ctinfo_params *cp_new; 162 struct tcf_ctinfo_params *cp_new;
162 struct tcf_chain *goto_ch = NULL; 163 struct tcf_chain *goto_ch = NULL;
163 u32 dscpmask = 0, dscpstatemask;
164 struct tc_ctinfo *actparm; 164 struct tc_ctinfo *actparm;
165 struct tcf_ctinfo *ci; 165 struct tcf_ctinfo *ci;
166 u8 dscpmaskshift; 166 u8 dscpmaskshift;
@@ -206,12 +206,13 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
206 } 206 }
207 207
208 /* done the validation:now to the actual action allocation */ 208 /* done the validation:now to the actual action allocation */
209 err = tcf_idr_check_alloc(tn, &actparm->index, a, bind); 209 index = actparm->index;
210 err = tcf_idr_check_alloc(tn, &index, a, bind);
210 if (!err) { 211 if (!err) {
211 ret = tcf_idr_create(tn, actparm->index, est, a, 212 ret = tcf_idr_create(tn, index, est, a,
212 &act_ctinfo_ops, bind, false); 213 &act_ctinfo_ops, bind, false);
213 if (ret) { 214 if (ret) {
214 tcf_idr_cleanup(tn, actparm->index); 215 tcf_idr_cleanup(tn, index);
215 return ret; 216 return ret;
216 } 217 }
217 ret = ACT_P_CREATED; 218 ret = ACT_P_CREATED;
@@ -375,7 +376,7 @@ static __net_init int ctinfo_init_net(struct net *net)
375{ 376{
376 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 377 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
377 378
378 return tc_action_net_init(tn, &act_ctinfo_ops); 379 return tc_action_net_init(net, tn, &act_ctinfo_ops);
379} 380}
380 381
381static void __net_exit ctinfo_exit_net(struct list_head *net_list) 382static void __net_exit ctinfo_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b2380c5284e6..324f1d1f6d47 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -61,6 +61,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
61 struct tc_gact *parm; 61 struct tc_gact *parm;
62 struct tcf_gact *gact; 62 struct tcf_gact *gact;
63 int ret = 0; 63 int ret = 0;
64 u32 index;
64 int err; 65 int err;
65#ifdef CONFIG_GACT_PROB 66#ifdef CONFIG_GACT_PROB
66 struct tc_gact_p *p_parm = NULL; 67 struct tc_gact_p *p_parm = NULL;
@@ -77,6 +78,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
77 if (tb[TCA_GACT_PARMS] == NULL) 78 if (tb[TCA_GACT_PARMS] == NULL)
78 return -EINVAL; 79 return -EINVAL;
79 parm = nla_data(tb[TCA_GACT_PARMS]); 80 parm = nla_data(tb[TCA_GACT_PARMS]);
81 index = parm->index;
80 82
81#ifndef CONFIG_GACT_PROB 83#ifndef CONFIG_GACT_PROB
82 if (tb[TCA_GACT_PROB] != NULL) 84 if (tb[TCA_GACT_PROB] != NULL)
@@ -94,12 +96,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
94 } 96 }
95#endif 97#endif
96 98
97 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 99 err = tcf_idr_check_alloc(tn, &index, a, bind);
98 if (!err) { 100 if (!err) {
99 ret = tcf_idr_create(tn, parm->index, est, a, 101 ret = tcf_idr_create(tn, index, est, a,
100 &act_gact_ops, bind, true); 102 &act_gact_ops, bind, true);
101 if (ret) { 103 if (ret) {
102 tcf_idr_cleanup(tn, parm->index); 104 tcf_idr_cleanup(tn, index);
103 return ret; 105 return ret;
104 } 106 }
105 ret = ACT_P_CREATED; 107 ret = ACT_P_CREATED;
@@ -276,7 +278,7 @@ static __net_init int gact_init_net(struct net *net)
276{ 278{
277 struct tc_action_net *tn = net_generic(net, gact_net_id); 279 struct tc_action_net *tn = net_generic(net, gact_net_id);
278 280
279 return tc_action_net_init(tn, &act_gact_ops); 281 return tc_action_net_init(net, tn, &act_gact_ops);
280} 282}
281 283
282static void __net_exit gact_exit_net(struct list_head *net_list) 284static void __net_exit gact_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 41d5398dd2f2..3a31e241c647 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -479,8 +479,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
479 u8 *saddr = NULL; 479 u8 *saddr = NULL;
480 bool exists = false; 480 bool exists = false;
481 int ret = 0; 481 int ret = 0;
482 u32 index;
482 int err; 483 int err;
483 484
485 if (!nla) {
486 NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
487 return -EINVAL;
488 }
489
484 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy, 490 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
485 NULL); 491 NULL);
486 if (err < 0) 492 if (err < 0)
@@ -502,7 +508,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
502 if (!p) 508 if (!p)
503 return -ENOMEM; 509 return -ENOMEM;
504 510
505 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 511 index = parm->index;
512 err = tcf_idr_check_alloc(tn, &index, a, bind);
506 if (err < 0) { 513 if (err < 0) {
507 kfree(p); 514 kfree(p);
508 return err; 515 return err;
@@ -514,10 +521,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
514 } 521 }
515 522
516 if (!exists) { 523 if (!exists) {
517 ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops, 524 ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
518 bind, true); 525 bind, true);
519 if (ret) { 526 if (ret) {
520 tcf_idr_cleanup(tn, parm->index); 527 tcf_idr_cleanup(tn, index);
521 kfree(p); 528 kfree(p);
522 return ret; 529 return ret;
523 } 530 }
@@ -883,7 +890,7 @@ static __net_init int ife_init_net(struct net *net)
883{ 890{
884 struct tc_action_net *tn = net_generic(net, ife_net_id); 891 struct tc_action_net *tn = net_generic(net, ife_net_id);
885 892
886 return tc_action_net_init(tn, &act_ife_ops); 893 return tc_action_net_init(net, tn, &act_ife_ops);
887} 894}
888 895
889static void __net_exit ife_exit_net(struct list_head *net_list) 896static void __net_exit ife_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index ce2c30a591d2..214a03d405cf 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
61 return 0; 61 return 0;
62} 62}
63 63
64static void ipt_destroy_target(struct xt_entry_target *t) 64static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
65{ 65{
66 struct xt_tgdtor_param par = { 66 struct xt_tgdtor_param par = {
67 .target = t->u.kernel.target, 67 .target = t->u.kernel.target,
68 .targinfo = t->data, 68 .targinfo = t->data,
69 .family = NFPROTO_IPV4, 69 .family = NFPROTO_IPV4,
70 .net = net,
70 }; 71 };
71 if (par.target->destroy != NULL) 72 if (par.target->destroy != NULL)
72 par.target->destroy(&par); 73 par.target->destroy(&par);
@@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a)
78 struct tcf_ipt *ipt = to_ipt(a); 79 struct tcf_ipt *ipt = to_ipt(a);
79 80
80 if (ipt->tcfi_t) { 81 if (ipt->tcfi_t) {
81 ipt_destroy_target(ipt->tcfi_t); 82 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
82 kfree(ipt->tcfi_t); 83 kfree(ipt->tcfi_t);
83 } 84 }
84 kfree(ipt->tcfi_tname); 85 kfree(ipt->tcfi_tname);
@@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
180 181
181 spin_lock_bh(&ipt->tcf_lock); 182 spin_lock_bh(&ipt->tcf_lock);
182 if (ret != ACT_P_CREATED) { 183 if (ret != ACT_P_CREATED) {
183 ipt_destroy_target(ipt->tcfi_t); 184 ipt_destroy_target(ipt->tcfi_t, net);
184 kfree(ipt->tcfi_tname); 185 kfree(ipt->tcfi_tname);
185 kfree(ipt->tcfi_t); 186 kfree(ipt->tcfi_t);
186 } 187 }
@@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net)
350{ 351{
351 struct tc_action_net *tn = net_generic(net, ipt_net_id); 352 struct tc_action_net *tn = net_generic(net, ipt_net_id);
352 353
353 return tc_action_net_init(tn, &act_ipt_ops); 354 return tc_action_net_init(net, tn, &act_ipt_ops);
354} 355}
355 356
356static void __net_exit ipt_exit_net(struct list_head *net_list) 357static void __net_exit ipt_exit_net(struct list_head *net_list)
@@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net)
399{ 400{
400 struct tc_action_net *tn = net_generic(net, xt_net_id); 401 struct tc_action_net *tn = net_generic(net, xt_net_id);
401 402
402 return tc_action_net_init(tn, &act_xt_ops); 403 return tc_action_net_init(net, tn, &act_xt_ops);
403} 404}
404 405
405static void __net_exit xt_exit_net(struct list_head *net_list) 406static void __net_exit xt_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 055faa298c8e..9d1bf508075a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
104 struct net_device *dev; 104 struct net_device *dev;
105 bool exists = false; 105 bool exists = false;
106 int ret, err; 106 int ret, err;
107 u32 index;
107 108
108 if (!nla) { 109 if (!nla) {
109 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed"); 110 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -118,8 +119,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
118 return -EINVAL; 119 return -EINVAL;
119 } 120 }
120 parm = nla_data(tb[TCA_MIRRED_PARMS]); 121 parm = nla_data(tb[TCA_MIRRED_PARMS]);
121 122 index = parm->index;
122 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 123 err = tcf_idr_check_alloc(tn, &index, a, bind);
123 if (err < 0) 124 if (err < 0)
124 return err; 125 return err;
125 exists = err; 126 exists = err;
@@ -136,21 +137,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
136 if (exists) 137 if (exists)
137 tcf_idr_release(*a, bind); 138 tcf_idr_release(*a, bind);
138 else 139 else
139 tcf_idr_cleanup(tn, parm->index); 140 tcf_idr_cleanup(tn, index);
140 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); 141 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141 return -EINVAL; 142 return -EINVAL;
142 } 143 }
143 144
144 if (!exists) { 145 if (!exists) {
145 if (!parm->ifindex) { 146 if (!parm->ifindex) {
146 tcf_idr_cleanup(tn, parm->index); 147 tcf_idr_cleanup(tn, index);
147 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); 148 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
150 ret = tcf_idr_create(tn, parm->index, est, a, 151 ret = tcf_idr_create(tn, index, est, a,
151 &act_mirred_ops, bind, true); 152 &act_mirred_ops, bind, true);
152 if (ret) { 153 if (ret) {
153 tcf_idr_cleanup(tn, parm->index); 154 tcf_idr_cleanup(tn, index);
154 return ret; 155 return ret;
155 } 156 }
156 ret = ACT_P_CREATED; 157 ret = ACT_P_CREATED;
@@ -452,7 +453,7 @@ static __net_init int mirred_init_net(struct net *net)
452{ 453{
453 struct tc_action_net *tn = net_generic(net, mirred_net_id); 454 struct tc_action_net *tn = net_generic(net, mirred_net_id);
454 455
455 return tc_action_net_init(tn, &act_mirred_ops); 456 return tc_action_net_init(net, tn, &act_mirred_ops);
456} 457}
457 458
458static void __net_exit mirred_exit_net(struct list_head *net_list) 459static void __net_exit mirred_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index ca2597ce4ac9..e168df0e008a 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -138,6 +138,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
138 struct tcf_mpls *m; 138 struct tcf_mpls *m;
139 int ret = 0, err; 139 int ret = 0, err;
140 u8 mpls_ttl = 0; 140 u8 mpls_ttl = 0;
141 u32 index;
141 142
142 if (!nla) { 143 if (!nla) {
143 NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes"); 144 NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes");
@@ -153,6 +154,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
153 return -EINVAL; 154 return -EINVAL;
154 } 155 }
155 parm = nla_data(tb[TCA_MPLS_PARMS]); 156 parm = nla_data(tb[TCA_MPLS_PARMS]);
157 index = parm->index;
156 158
157 /* Verify parameters against action type. */ 159 /* Verify parameters against action type. */
158 switch (parm->m_action) { 160 switch (parm->m_action) {
@@ -209,7 +211,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
209 return -EINVAL; 211 return -EINVAL;
210 } 212 }
211 213
212 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 214 err = tcf_idr_check_alloc(tn, &index, a, bind);
213 if (err < 0) 215 if (err < 0)
214 return err; 216 return err;
215 exists = err; 217 exists = err;
@@ -217,10 +219,10 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
217 return 0; 219 return 0;
218 220
219 if (!exists) { 221 if (!exists) {
220 ret = tcf_idr_create(tn, parm->index, est, a, 222 ret = tcf_idr_create(tn, index, est, a,
221 &act_mpls_ops, bind, true); 223 &act_mpls_ops, bind, true);
222 if (ret) { 224 if (ret) {
223 tcf_idr_cleanup(tn, parm->index); 225 tcf_idr_cleanup(tn, index);
224 return ret; 226 return ret;
225 } 227 }
226 228
@@ -373,7 +375,7 @@ static __net_init int mpls_init_net(struct net *net)
373{ 375{
374 struct tc_action_net *tn = net_generic(net, mpls_net_id); 376 struct tc_action_net *tn = net_generic(net, mpls_net_id);
375 377
376 return tc_action_net_init(tn, &act_mpls_ops); 378 return tc_action_net_init(net, tn, &act_mpls_ops);
377} 379}
378 380
379static void __net_exit mpls_exit_net(struct list_head *net_list) 381static void __net_exit mpls_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 45923ebb7a4f..ea4c5359e7df 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -44,6 +44,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
44 struct tc_nat *parm; 44 struct tc_nat *parm;
45 int ret = 0, err; 45 int ret = 0, err;
46 struct tcf_nat *p; 46 struct tcf_nat *p;
47 u32 index;
47 48
48 if (nla == NULL) 49 if (nla == NULL)
49 return -EINVAL; 50 return -EINVAL;
@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
56 if (tb[TCA_NAT_PARMS] == NULL) 57 if (tb[TCA_NAT_PARMS] == NULL)
57 return -EINVAL; 58 return -EINVAL;
58 parm = nla_data(tb[TCA_NAT_PARMS]); 59 parm = nla_data(tb[TCA_NAT_PARMS]);
59 60 index = parm->index;
60 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 61 err = tcf_idr_check_alloc(tn, &index, a, bind);
61 if (!err) { 62 if (!err) {
62 ret = tcf_idr_create(tn, parm->index, est, a, 63 ret = tcf_idr_create(tn, index, est, a,
63 &act_nat_ops, bind, false); 64 &act_nat_ops, bind, false);
64 if (ret) { 65 if (ret) {
65 tcf_idr_cleanup(tn, parm->index); 66 tcf_idr_cleanup(tn, index);
66 return ret; 67 return ret;
67 } 68 }
68 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
@@ -326,7 +327,7 @@ static __net_init int nat_init_net(struct net *net)
326{ 327{
327 struct tc_action_net *tn = net_generic(net, nat_net_id); 328 struct tc_action_net *tn = net_generic(net, nat_net_id);
328 329
329 return tc_action_net_init(tn, &act_nat_ops); 330 return tc_action_net_init(net, tn, &act_nat_ops);
330} 331}
331 332
332static void __net_exit nat_exit_net(struct list_head *net_list) 333static void __net_exit nat_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 45e9d6bfddb3..cdfaa79382a2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
149 struct tcf_pedit *p; 149 struct tcf_pedit *p;
150 int ret = 0, err; 150 int ret = 0, err;
151 int ksize; 151 int ksize;
152 u32 index;
152 153
153 if (!nla) { 154 if (!nla) {
154 NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed"); 155 NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
@@ -179,18 +180,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
179 if (IS_ERR(keys_ex)) 180 if (IS_ERR(keys_ex))
180 return PTR_ERR(keys_ex); 181 return PTR_ERR(keys_ex);
181 182
182 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 183 index = parm->index;
184 err = tcf_idr_check_alloc(tn, &index, a, bind);
183 if (!err) { 185 if (!err) {
184 if (!parm->nkeys) { 186 if (!parm->nkeys) {
185 tcf_idr_cleanup(tn, parm->index); 187 tcf_idr_cleanup(tn, index);
186 NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); 188 NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
187 ret = -EINVAL; 189 ret = -EINVAL;
188 goto out_free; 190 goto out_free;
189 } 191 }
190 ret = tcf_idr_create(tn, parm->index, est, a, 192 ret = tcf_idr_create(tn, index, est, a,
191 &act_pedit_ops, bind, false); 193 &act_pedit_ops, bind, false);
192 if (ret) { 194 if (ret) {
193 tcf_idr_cleanup(tn, parm->index); 195 tcf_idr_cleanup(tn, index);
194 goto out_free; 196 goto out_free;
195 } 197 }
196 ret = ACT_P_CREATED; 198 ret = ACT_P_CREATED;
@@ -496,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net)
496{ 498{
497 struct tc_action_net *tn = net_generic(net, pedit_net_id); 499 struct tc_action_net *tn = net_generic(net, pedit_net_id);
498 500
499 return tc_action_net_init(tn, &act_pedit_ops); 501 return tc_action_net_init(net, tn, &act_pedit_ops);
500} 502}
501 503
502static void __net_exit pedit_exit_net(struct list_head *net_list) 504static void __net_exit pedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index a065f62fa79c..6315e0f8d26e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -57,6 +57,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
57 struct tc_action_net *tn = net_generic(net, police_net_id); 57 struct tc_action_net *tn = net_generic(net, police_net_id);
58 struct tcf_police_params *new; 58 struct tcf_police_params *new;
59 bool exists = false; 59 bool exists = false;
60 u32 index;
60 61
61 if (nla == NULL) 62 if (nla == NULL)
62 return -EINVAL; 63 return -EINVAL;
@@ -73,7 +74,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
73 return -EINVAL; 74 return -EINVAL;
74 75
75 parm = nla_data(tb[TCA_POLICE_TBF]); 76 parm = nla_data(tb[TCA_POLICE_TBF]);
76 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 77 index = parm->index;
78 err = tcf_idr_check_alloc(tn, &index, a, bind);
77 if (err < 0) 79 if (err < 0)
78 return err; 80 return err;
79 exists = err; 81 exists = err;
@@ -81,10 +83,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
81 return 0; 83 return 0;
82 84
83 if (!exists) { 85 if (!exists) {
84 ret = tcf_idr_create(tn, parm->index, NULL, a, 86 ret = tcf_idr_create(tn, index, NULL, a,
85 &act_police_ops, bind, true); 87 &act_police_ops, bind, true);
86 if (ret) { 88 if (ret) {
87 tcf_idr_cleanup(tn, parm->index); 89 tcf_idr_cleanup(tn, index);
88 return ret; 90 return ret;
89 } 91 }
90 ret = ACT_P_CREATED; 92 ret = ACT_P_CREATED;
@@ -369,7 +371,7 @@ static __net_init int police_init_net(struct net *net)
369{ 371{
370 struct tc_action_net *tn = net_generic(net, police_net_id); 372 struct tc_action_net *tn = net_generic(net, police_net_id);
371 373
372 return tc_action_net_init(tn, &act_police_ops); 374 return tc_action_net_init(net, tn, &act_police_ops);
373} 375}
374 376
375static void __net_exit police_exit_net(struct list_head *net_list) 377static void __net_exit police_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 274d7a0c0e25..10229124a992 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -41,8 +41,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
41 struct tc_action_net *tn = net_generic(net, sample_net_id); 41 struct tc_action_net *tn = net_generic(net, sample_net_id);
42 struct nlattr *tb[TCA_SAMPLE_MAX + 1]; 42 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
43 struct psample_group *psample_group; 43 struct psample_group *psample_group;
44 u32 psample_group_num, rate, index;
44 struct tcf_chain *goto_ch = NULL; 45 struct tcf_chain *goto_ch = NULL;
45 u32 psample_group_num, rate;
46 struct tc_sample *parm; 46 struct tc_sample *parm;
47 struct tcf_sample *s; 47 struct tcf_sample *s;
48 bool exists = false; 48 bool exists = false;
@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
59 return -EINVAL; 59 return -EINVAL;
60 60
61 parm = nla_data(tb[TCA_SAMPLE_PARMS]); 61 parm = nla_data(tb[TCA_SAMPLE_PARMS]);
62 62 index = parm->index;
63 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 63 err = tcf_idr_check_alloc(tn, &index, a, bind);
64 if (err < 0) 64 if (err < 0)
65 return err; 65 return err;
66 exists = err; 66 exists = err;
@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
68 return 0; 68 return 0;
69 69
70 if (!exists) { 70 if (!exists) {
71 ret = tcf_idr_create(tn, parm->index, est, a, 71 ret = tcf_idr_create(tn, index, est, a,
72 &act_sample_ops, bind, true); 72 &act_sample_ops, bind, true);
73 if (ret) { 73 if (ret) {
74 tcf_idr_cleanup(tn, parm->index); 74 tcf_idr_cleanup(tn, index);
75 return ret; 75 return ret;
76 } 76 }
77 ret = ACT_P_CREATED; 77 ret = ACT_P_CREATED;
@@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
103 s->rate = rate; 103 s->rate = rate;
104 s->psample_group_num = psample_group_num; 104 s->psample_group_num = psample_group_num;
105 RCU_INIT_POINTER(s->psample_group, psample_group); 105 rcu_swap_protected(s->psample_group, psample_group,
106 lockdep_is_held(&s->tcf_lock));
106 107
107 if (tb[TCA_SAMPLE_TRUNC_SIZE]) { 108 if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
108 s->truncate = true; 109 s->truncate = true;
109 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); 110 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
110 } 111 }
111 spin_unlock_bh(&s->tcf_lock); 112 spin_unlock_bh(&s->tcf_lock);
113
114 if (psample_group)
115 psample_group_put(psample_group);
112 if (goto_ch) 116 if (goto_ch)
113 tcf_chain_put_by_act(goto_ch); 117 tcf_chain_put_by_act(goto_ch);
114 118
@@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net)
265{ 269{
266 struct tc_action_net *tn = net_generic(net, sample_net_id); 270 struct tc_action_net *tn = net_generic(net, sample_net_id);
267 271
268 return tc_action_net_init(tn, &act_sample_ops); 272 return tc_action_net_init(net, tn, &act_sample_ops);
269} 273}
270 274
271static void __net_exit sample_exit_net(struct list_head *net_list) 275static void __net_exit sample_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index f28ddbabff76..6120e56117ca 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -95,6 +95,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
95 struct tcf_defact *d; 95 struct tcf_defact *d;
96 bool exists = false; 96 bool exists = false;
97 int ret = 0, err; 97 int ret = 0, err;
98 u32 index;
98 99
99 if (nla == NULL) 100 if (nla == NULL)
100 return -EINVAL; 101 return -EINVAL;
@@ -108,7 +109,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
108 return -EINVAL; 109 return -EINVAL;
109 110
110 parm = nla_data(tb[TCA_DEF_PARMS]); 111 parm = nla_data(tb[TCA_DEF_PARMS]);
111 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 112 index = parm->index;
113 err = tcf_idr_check_alloc(tn, &index, a, bind);
112 if (err < 0) 114 if (err < 0)
113 return err; 115 return err;
114 exists = err; 116 exists = err;
@@ -119,15 +121,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
119 if (exists) 121 if (exists)
120 tcf_idr_release(*a, bind); 122 tcf_idr_release(*a, bind);
121 else 123 else
122 tcf_idr_cleanup(tn, parm->index); 124 tcf_idr_cleanup(tn, index);
123 return -EINVAL; 125 return -EINVAL;
124 } 126 }
125 127
126 if (!exists) { 128 if (!exists) {
127 ret = tcf_idr_create(tn, parm->index, est, a, 129 ret = tcf_idr_create(tn, index, est, a,
128 &act_simp_ops, bind, false); 130 &act_simp_ops, bind, false);
129 if (ret) { 131 if (ret) {
130 tcf_idr_cleanup(tn, parm->index); 132 tcf_idr_cleanup(tn, index);
131 return ret; 133 return ret;
132 } 134 }
133 135
@@ -230,7 +232,7 @@ static __net_init int simp_init_net(struct net *net)
230{ 232{
231 struct tc_action_net *tn = net_generic(net, simp_net_id); 233 struct tc_action_net *tn = net_generic(net, simp_net_id);
232 234
233 return tc_action_net_init(tn, &act_simp_ops); 235 return tc_action_net_init(net, tn, &act_simp_ops);
234} 236}
235 237
236static void __net_exit simp_exit_net(struct list_head *net_list) 238static void __net_exit simp_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 215a06705cef..6a8d3337c577 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,6 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
99 u16 *queue_mapping = NULL, *ptype = NULL; 99 u16 *queue_mapping = NULL, *ptype = NULL;
100 bool exists = false; 100 bool exists = false;
101 int ret = 0, err; 101 int ret = 0, err;
102 u32 index;
102 103
103 if (nla == NULL) 104 if (nla == NULL)
104 return -EINVAL; 105 return -EINVAL;
@@ -146,8 +147,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
146 } 147 }
147 148
148 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 149 parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
149 150 index = parm->index;
150 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 151 err = tcf_idr_check_alloc(tn, &index, a, bind);
151 if (err < 0) 152 if (err < 0)
152 return err; 153 return err;
153 exists = err; 154 exists = err;
@@ -158,15 +159,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
158 if (exists) 159 if (exists)
159 tcf_idr_release(*a, bind); 160 tcf_idr_release(*a, bind);
160 else 161 else
161 tcf_idr_cleanup(tn, parm->index); 162 tcf_idr_cleanup(tn, index);
162 return -EINVAL; 163 return -EINVAL;
163 } 164 }
164 165
165 if (!exists) { 166 if (!exists) {
166 ret = tcf_idr_create(tn, parm->index, est, a, 167 ret = tcf_idr_create(tn, index, est, a,
167 &act_skbedit_ops, bind, true); 168 &act_skbedit_ops, bind, true);
168 if (ret) { 169 if (ret) {
169 tcf_idr_cleanup(tn, parm->index); 170 tcf_idr_cleanup(tn, index);
170 return ret; 171 return ret;
171 } 172 }
172 173
@@ -306,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
306 return tcf_idr_search(tn, a, index); 307 return tcf_idr_search(tn, a, index);
307} 308}
308 309
310static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
311{
312 return nla_total_size(sizeof(struct tc_skbedit))
313 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
314 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
315 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
316 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
317 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
318 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
319}
320
309static struct tc_action_ops act_skbedit_ops = { 321static struct tc_action_ops act_skbedit_ops = {
310 .kind = "skbedit", 322 .kind = "skbedit",
311 .id = TCA_ID_SKBEDIT, 323 .id = TCA_ID_SKBEDIT,
@@ -315,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
315 .init = tcf_skbedit_init, 327 .init = tcf_skbedit_init,
316 .cleanup = tcf_skbedit_cleanup, 328 .cleanup = tcf_skbedit_cleanup,
317 .walk = tcf_skbedit_walker, 329 .walk = tcf_skbedit_walker,
330 .get_fill_size = tcf_skbedit_get_fill_size,
318 .lookup = tcf_skbedit_search, 331 .lookup = tcf_skbedit_search,
319 .size = sizeof(struct tcf_skbedit), 332 .size = sizeof(struct tcf_skbedit),
320}; 333};
@@ -323,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net)
323{ 336{
324 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 337 struct tc_action_net *tn = net_generic(net, skbedit_net_id);
325 338
326 return tc_action_net_init(tn, &act_skbedit_ops); 339 return tc_action_net_init(net, tn, &act_skbedit_ops);
327} 340}
328 341
329static void __net_exit skbedit_exit_net(struct list_head *net_list) 342static void __net_exit skbedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 4f07706eff07..888437f97ba6 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -87,12 +87,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
87 struct tcf_skbmod_params *p, *p_old; 87 struct tcf_skbmod_params *p, *p_old;
88 struct tcf_chain *goto_ch = NULL; 88 struct tcf_chain *goto_ch = NULL;
89 struct tc_skbmod *parm; 89 struct tc_skbmod *parm;
90 u32 lflags = 0, index;
90 struct tcf_skbmod *d; 91 struct tcf_skbmod *d;
91 bool exists = false; 92 bool exists = false;
92 u8 *daddr = NULL; 93 u8 *daddr = NULL;
93 u8 *saddr = NULL; 94 u8 *saddr = NULL;
94 u16 eth_type = 0; 95 u16 eth_type = 0;
95 u32 lflags = 0;
96 int ret = 0, err; 96 int ret = 0, err;
97 97
98 if (!nla) 98 if (!nla)
@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
122 } 122 }
123 123
124 parm = nla_data(tb[TCA_SKBMOD_PARMS]); 124 parm = nla_data(tb[TCA_SKBMOD_PARMS]);
125 index = parm->index;
125 if (parm->flags & SKBMOD_F_SWAPMAC) 126 if (parm->flags & SKBMOD_F_SWAPMAC)
126 lflags = SKBMOD_F_SWAPMAC; 127 lflags = SKBMOD_F_SWAPMAC;
127 128
128 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 129 err = tcf_idr_check_alloc(tn, &index, a, bind);
129 if (err < 0) 130 if (err < 0)
130 return err; 131 return err;
131 exists = err; 132 exists = err;
@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
136 if (exists) 137 if (exists)
137 tcf_idr_release(*a, bind); 138 tcf_idr_release(*a, bind);
138 else 139 else
139 tcf_idr_cleanup(tn, parm->index); 140 tcf_idr_cleanup(tn, index);
140 return -EINVAL; 141 return -EINVAL;
141 } 142 }
142 143
143 if (!exists) { 144 if (!exists) {
144 ret = tcf_idr_create(tn, parm->index, est, a, 145 ret = tcf_idr_create(tn, index, est, a,
145 &act_skbmod_ops, bind, true); 146 &act_skbmod_ops, bind, true);
146 if (ret) { 147 if (ret) {
147 tcf_idr_cleanup(tn, parm->index); 148 tcf_idr_cleanup(tn, index);
148 return ret; 149 return ret;
149 } 150 }
150 151
@@ -286,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net)
286{ 287{
287 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 288 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
288 289
289 return tc_action_net_init(tn, &act_skbmod_ops); 290 return tc_action_net_init(net, tn, &act_skbmod_ops);
290} 291}
291 292
292static void __net_exit skbmod_exit_net(struct list_head *net_list) 293static void __net_exit skbmod_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 10dffda1d5cc..2f83a79f76aa 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -225,6 +225,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
225 __be16 flags = 0; 225 __be16 flags = 0;
226 u8 tos, ttl; 226 u8 tos, ttl;
227 int ret = 0; 227 int ret = 0;
228 u32 index;
228 int err; 229 int err;
229 230
230 if (!nla) { 231 if (!nla) {
@@ -245,7 +246,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
245 } 246 }
246 247
247 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); 248 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
248 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 249 index = parm->index;
250 err = tcf_idr_check_alloc(tn, &index, a, bind);
249 if (err < 0) 251 if (err < 0)
250 return err; 252 return err;
251 exists = err; 253 exists = err;
@@ -345,7 +347,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
345 } 347 }
346 348
347 if (!exists) { 349 if (!exists) {
348 ret = tcf_idr_create(tn, parm->index, est, a, 350 ret = tcf_idr_create(tn, index, est, a,
349 &act_tunnel_key_ops, bind, true); 351 &act_tunnel_key_ops, bind, true);
350 if (ret) { 352 if (ret) {
351 NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); 353 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
@@ -403,7 +405,7 @@ err_out:
403 if (exists) 405 if (exists)
404 tcf_idr_release(*a, bind); 406 tcf_idr_release(*a, bind);
405 else 407 else
406 tcf_idr_cleanup(tn, parm->index); 408 tcf_idr_cleanup(tn, index);
407 return ret; 409 return ret;
408} 410}
409 411
@@ -598,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
598{ 600{
599 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
600 602
601 return tc_action_net_init(tn, &act_tunnel_key_ops); 603 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
602} 604}
603 605
604static void __net_exit tunnel_key_exit_net(struct list_head *net_list) 606static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 9269d350fb8a..287a30bf8930 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,6 +116,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
116 u8 push_prio = 0; 116 u8 push_prio = 0;
117 bool exists = false; 117 bool exists = false;
118 int ret = 0, err; 118 int ret = 0, err;
119 u32 index;
119 120
120 if (!nla) 121 if (!nla)
121 return -EINVAL; 122 return -EINVAL;
@@ -128,7 +129,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
128 if (!tb[TCA_VLAN_PARMS]) 129 if (!tb[TCA_VLAN_PARMS])
129 return -EINVAL; 130 return -EINVAL;
130 parm = nla_data(tb[TCA_VLAN_PARMS]); 131 parm = nla_data(tb[TCA_VLAN_PARMS]);
131 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 132 index = parm->index;
133 err = tcf_idr_check_alloc(tn, &index, a, bind);
132 if (err < 0) 134 if (err < 0)
133 return err; 135 return err;
134 exists = err; 136 exists = err;
@@ -144,7 +146,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
144 if (exists) 146 if (exists)
145 tcf_idr_release(*a, bind); 147 tcf_idr_release(*a, bind);
146 else 148 else
147 tcf_idr_cleanup(tn, parm->index); 149 tcf_idr_cleanup(tn, index);
148 return -EINVAL; 150 return -EINVAL;
149 } 151 }
150 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 152 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
@@ -152,7 +154,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
152 if (exists) 154 if (exists)
153 tcf_idr_release(*a, bind); 155 tcf_idr_release(*a, bind);
154 else 156 else
155 tcf_idr_cleanup(tn, parm->index); 157 tcf_idr_cleanup(tn, index);
156 return -ERANGE; 158 return -ERANGE;
157 } 159 }
158 160
@@ -166,7 +168,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
166 if (exists) 168 if (exists)
167 tcf_idr_release(*a, bind); 169 tcf_idr_release(*a, bind);
168 else 170 else
169 tcf_idr_cleanup(tn, parm->index); 171 tcf_idr_cleanup(tn, index);
170 return -EPROTONOSUPPORT; 172 return -EPROTONOSUPPORT;
171 } 173 }
172 } else { 174 } else {
@@ -180,16 +182,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
180 if (exists) 182 if (exists)
181 tcf_idr_release(*a, bind); 183 tcf_idr_release(*a, bind);
182 else 184 else
183 tcf_idr_cleanup(tn, parm->index); 185 tcf_idr_cleanup(tn, index);
184 return -EINVAL; 186 return -EINVAL;
185 } 187 }
186 action = parm->v_action; 188 action = parm->v_action;
187 189
188 if (!exists) { 190 if (!exists) {
189 ret = tcf_idr_create(tn, parm->index, est, a, 191 ret = tcf_idr_create(tn, index, est, a,
190 &act_vlan_ops, bind, true); 192 &act_vlan_ops, bind, true);
191 if (ret) { 193 if (ret) {
192 tcf_idr_cleanup(tn, parm->index); 194 tcf_idr_cleanup(tn, index);
193 return ret; 195 return ret;
194 } 196 }
195 197
@@ -306,6 +308,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
306 return tcf_idr_search(tn, a, index); 308 return tcf_idr_search(tn, a, index);
307} 309}
308 310
311static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
312{
313 return nla_total_size(sizeof(struct tc_vlan))
314 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
315 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
316 + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
317}
318
309static struct tc_action_ops act_vlan_ops = { 319static struct tc_action_ops act_vlan_ops = {
310 .kind = "vlan", 320 .kind = "vlan",
311 .id = TCA_ID_VLAN, 321 .id = TCA_ID_VLAN,
@@ -315,6 +325,7 @@ static struct tc_action_ops act_vlan_ops = {
315 .init = tcf_vlan_init, 325 .init = tcf_vlan_init,
316 .cleanup = tcf_vlan_cleanup, 326 .cleanup = tcf_vlan_cleanup,
317 .walk = tcf_vlan_walker, 327 .walk = tcf_vlan_walker,
328 .get_fill_size = tcf_vlan_get_fill_size,
318 .lookup = tcf_vlan_search, 329 .lookup = tcf_vlan_search,
319 .size = sizeof(struct tcf_vlan), 330 .size = sizeof(struct tcf_vlan),
320}; 331};
@@ -323,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net)
323{ 334{
324 struct tc_action_net *tn = net_generic(net, vlan_net_id); 335 struct tc_action_net *tn = net_generic(net, vlan_net_id);
325 336
326 return tc_action_net_init(tn, &act_vlan_ops); 337 return tc_action_net_init(net, tn, &act_vlan_ops);
327} 338}
328 339
329static void __net_exit vlan_exit_net(struct list_head *net_list) 340static void __net_exit vlan_exit_net(struct list_head *net_list)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d144233423c5..efd3cfb80a2a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -691,6 +691,8 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
691 if (!indr_dev->block) 691 if (!indr_dev->block)
692 return; 692 return;
693 693
694 bo.block = &indr_dev->block->flow_block;
695
694 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, 696 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
695 &bo); 697 &bo);
696 tcf_block_setup(indr_dev->block, &bo); 698 tcf_block_setup(indr_dev->block, &bo);
@@ -775,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
775 .command = command, 777 .command = command,
776 .binder_type = ei->binder_type, 778 .binder_type = ei->binder_type,
777 .net = dev_net(dev), 779 .net = dev_net(dev),
780 .block = &block->flow_block,
778 .block_shared = tcf_block_shared(block), 781 .block_shared = tcf_block_shared(block),
779 .extack = extack, 782 .extack = extack,
780 }; 783 };
@@ -810,6 +813,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
810 bo.net = dev_net(dev); 813 bo.net = dev_net(dev);
811 bo.command = command; 814 bo.command = command;
812 bo.binder_type = ei->binder_type; 815 bo.binder_type = ei->binder_type;
816 bo.block = &block->flow_block;
813 bo.block_shared = tcf_block_shared(block); 817 bo.block_shared = tcf_block_shared(block);
814 bo.extack = extack; 818 bo.extack = extack;
815 INIT_LIST_HEAD(&bo.cb_list); 819 INIT_LIST_HEAD(&bo.cb_list);
@@ -987,8 +991,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
987 return ERR_PTR(-ENOMEM); 991 return ERR_PTR(-ENOMEM);
988 } 992 }
989 mutex_init(&block->lock); 993 mutex_init(&block->lock);
994 flow_block_init(&block->flow_block);
990 INIT_LIST_HEAD(&block->chain_list); 995 INIT_LIST_HEAD(&block->chain_list);
991 INIT_LIST_HEAD(&block->cb_list);
992 INIT_LIST_HEAD(&block->owner_list); 996 INIT_LIST_HEAD(&block->owner_list);
993 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 997 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
994 998
@@ -1514,7 +1518,7 @@ void tcf_block_put(struct tcf_block *block)
1514EXPORT_SYMBOL(tcf_block_put); 1518EXPORT_SYMBOL(tcf_block_put);
1515 1519
1516static int 1520static int
1517tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, 1521tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1518 void *cb_priv, bool add, bool offload_in_use, 1522 void *cb_priv, bool add, bool offload_in_use,
1519 struct netlink_ext_ack *extack) 1523 struct netlink_ext_ack *extack)
1520{ 1524{
@@ -1570,7 +1574,7 @@ static int tcf_block_bind(struct tcf_block *block,
1570 1574
1571 i++; 1575 i++;
1572 } 1576 }
1573 list_splice(&bo->cb_list, &block->cb_list); 1577 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1574 1578
1575 return 0; 1579 return 0;
1576 1580
@@ -2152,7 +2156,9 @@ replay:
2152 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2156 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2153 RTM_NEWTFILTER, false, rtnl_held); 2157 RTM_NEWTFILTER, false, rtnl_held);
2154 tfilter_put(tp, fh); 2158 tfilter_put(tp, fh);
2155 q->flags &= ~TCQ_F_CAN_BYPASS; 2159 /* q pointer is NULL for shared blocks */
2160 if (q)
2161 q->flags &= ~TCQ_F_CAN_BYPASS;
2156 } 2162 }
2157 2163
2158errout: 2164errout:
@@ -3156,7 +3162,7 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3156 if (block->nooffloaddevcnt && err_stop) 3162 if (block->nooffloaddevcnt && err_stop)
3157 return -EOPNOTSUPP; 3163 return -EOPNOTSUPP;
3158 3164
3159 list_for_each_entry(block_cb, &block->cb_list, list) { 3165 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3160 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3166 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3161 if (err) { 3167 if (err) {
3162 if (err_stop) 3168 if (err_stop)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 691f71830134..3f7a9c02b70c 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -651,7 +651,7 @@ skip:
651 } 651 }
652} 652}
653 653
654static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 654static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
655 void *cb_priv, struct netlink_ext_ack *extack) 655 void *cb_priv, struct netlink_ext_ack *extack)
656{ 656{
657 struct cls_bpf_head *head = rtnl_dereference(tp->root); 657 struct cls_bpf_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 38d6e85693fc..054123742e32 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1800,7 +1800,7 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1800 return NULL; 1800 return NULL;
1801} 1801}
1802 1802
1803static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1803static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1804 void *cb_priv, struct netlink_ext_ack *extack) 1804 void *cb_priv, struct netlink_ext_ack *extack)
1805{ 1805{
1806 struct tcf_block *block = tp->chain->block; 1806 struct tcf_block *block = tp->chain->block;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index a30d2f8feb32..455ea2793f9b 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -282,7 +282,7 @@ skip:
282 arg->count++; 282 arg->count++;
283} 283}
284 284
285static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 285static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
286 void *cb_priv, struct netlink_ext_ack *extack) 286 void *cb_priv, struct netlink_ext_ack *extack)
287{ 287{
288 struct cls_mall_head *head = rtnl_dereference(tp->root); 288 struct cls_mall_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index be9e46c77e8b..8614088edd1b 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1152,7 +1152,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1152} 1152}
1153 1153
1154static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 1154static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1155 bool add, tc_setup_cb_t *cb, void *cb_priv, 1155 bool add, flow_setup_cb_t *cb, void *cb_priv,
1156 struct netlink_ext_ack *extack) 1156 struct netlink_ext_ack *extack)
1157{ 1157{
1158 struct tc_cls_u32_offload cls_u32 = {}; 1158 struct tc_cls_u32_offload cls_u32 = {};
@@ -1172,7 +1172,7 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1172} 1172}
1173 1173
1174static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, 1174static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1175 bool add, tc_setup_cb_t *cb, void *cb_priv, 1175 bool add, flow_setup_cb_t *cb, void *cb_priv,
1176 struct netlink_ext_ack *extack) 1176 struct netlink_ext_ack *extack)
1177{ 1177{
1178 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 1178 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@@ -1213,7 +1213,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1213 return 0; 1213 return 0;
1214} 1214}
1215 1215
1216static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1216static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1217 void *cb_priv, struct netlink_ext_ack *extack) 1217 void *cb_priv, struct netlink_ext_ack *extack)
1218{ 1218{
1219 struct tc_u_common *tp_c = tp->data; 1219 struct tc_u_common *tp_c = tp->data;
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 732e109c3055..810645b5c086 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
181 s64 credits; 181 s64 credits;
182 int len; 182 int len;
183 183
184 if (atomic64_read(&q->port_rate) == -1) {
185 WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
186 return NULL;
187 }
188
189 if (q->credits < 0) { 184 if (q->credits < 0) {
190 credits = timediff_to_credits(now - q->last, q->idleslope); 185 credits = timediff_to_credits(now - q->last, q->idleslope);
191 186
@@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
303static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) 298static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
304{ 299{
305 struct ethtool_link_ksettings ecmd; 300 struct ethtool_link_ksettings ecmd;
301 int speed = SPEED_10;
306 int port_rate = -1; 302 int port_rate = -1;
303 int err;
304
305 err = __ethtool_get_link_ksettings(dev, &ecmd);
306 if (err < 0)
307 goto skip;
308
309 if (ecmd.base.speed != SPEED_UNKNOWN)
310 speed = ecmd.base.speed;
307 311
308 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 312skip:
309 ecmd.base.speed != SPEED_UNKNOWN) 313 port_rate = speed * 1000 * BYTES_PER_KBIT;
310 port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
311 314
312 atomic64_set(&q->port_rate, port_rate); 315 atomic64_set(&q->port_rate, port_rate);
313 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", 316 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 25ef172c23df..30169b3adbbb 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
71 struct Qdisc *sch = ctx; 71 struct Qdisc *sch = ctx;
72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
73 73
74 if (skb) 74 if (skb) {
75 sch->qstats.backlog -= qdisc_pkt_len(skb); 75 sch->qstats.backlog -= qdisc_pkt_len(skb);
76 76 prefetch(&skb->end); /* we'll need skb_shinfo() */
77 prefetch(&skb->end); /* we'll need skb_shinfo() */ 77 }
78 return skb; 78 return skb;
79} 79}
80 80
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 11c03cf4aa74..137db1cbde85 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
624 624
625 err = skb_array_produce(q, skb); 625 err = skb_array_produce(q, skb);
626 626
627 if (unlikely(err)) 627 if (unlikely(err)) {
628 return qdisc_drop_cpu(skb, qdisc, to_free); 628 if (qdisc_is_percpu_stats(qdisc))
629 return qdisc_drop_cpu(skb, qdisc, to_free);
630 else
631 return qdisc_drop(skb, qdisc, to_free);
632 }
629 633
630 qdisc_update_stats_at_enqueue(qdisc, pkt_len); 634 qdisc_update_stats_at_enqueue(qdisc, pkt_len);
631 return NET_XMIT_SUCCESS; 635 return NET_XMIT_SUCCESS;
@@ -688,11 +692,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
688 kfree_skb(skb); 692 kfree_skb(skb);
689 } 693 }
690 694
691 for_each_possible_cpu(i) { 695 if (qdisc_is_percpu_stats(qdisc)) {
692 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 696 for_each_possible_cpu(i) {
697 struct gnet_stats_queue *q;
693 698
694 q->backlog = 0; 699 q = per_cpu_ptr(qdisc->cpu_qstats, i);
695 q->qlen = 0; 700 q->backlog = 0;
701 q->qlen = 0;
702 }
696 } 703 }
697} 704}
698 705
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c39db507ba3f..8d8bc2ec5cd6 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -477,11 +477,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
477 u32 gate_mask; 477 u32 gate_mask;
478 int i; 478 int i;
479 479
480 if (atomic64_read(&q->picos_per_byte) == -1) {
481 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
482 return NULL;
483 }
484
485 rcu_read_lock(); 480 rcu_read_lock();
486 entry = rcu_dereference(q->current_entry); 481 entry = rcu_dereference(q->current_entry);
487 /* if there's no entry, it means that the schedule didn't 482 /* if there's no entry, it means that the schedule didn't
@@ -958,12 +953,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
958 struct taprio_sched *q) 953 struct taprio_sched *q)
959{ 954{
960 struct ethtool_link_ksettings ecmd; 955 struct ethtool_link_ksettings ecmd;
961 int picos_per_byte = -1; 956 int speed = SPEED_10;
957 int picos_per_byte;
958 int err;
962 959
963 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 960 err = __ethtool_get_link_ksettings(dev, &ecmd);
964 ecmd.base.speed != SPEED_UNKNOWN) 961 if (err < 0)
965 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, 962 goto skip;
966 ecmd.base.speed * 1000 * 1000); 963
964 if (ecmd.base.speed != SPEED_UNKNOWN)
965 speed = ecmd.base.speed;
966
967skip:
968 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
969 speed * 1000 * 1000);
967 970
968 atomic64_set(&q->picos_per_byte, picos_per_byte); 971 atomic64_set(&q->picos_per_byte, picos_per_byte);
969 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 972 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1195,7 +1198,8 @@ unlock:
1195 spin_unlock_bh(qdisc_lock(sch)); 1198 spin_unlock_bh(qdisc_lock(sch));
1196 1199
1197free_sched: 1200free_sched:
1198 kfree(new_admin); 1201 if (new_admin)
1202 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1199 1203
1200 return err; 1204 return err;
1201} 1205}
@@ -1248,6 +1252,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1248 */ 1252 */
1249 q->clockid = -1; 1253 q->clockid = -1;
1250 1254
1255 spin_lock(&taprio_list_lock);
1256 list_add(&q->taprio_list, &taprio_list);
1257 spin_unlock(&taprio_list_lock);
1258
1251 if (sch->parent != TC_H_ROOT) 1259 if (sch->parent != TC_H_ROOT)
1252 return -EOPNOTSUPP; 1260 return -EOPNOTSUPP;
1253 1261
@@ -1265,10 +1273,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1265 if (!opt) 1273 if (!opt)
1266 return -EINVAL; 1274 return -EINVAL;
1267 1275
1268 spin_lock(&taprio_list_lock);
1269 list_add(&q->taprio_list, &taprio_list);
1270 spin_unlock(&taprio_list_lock);
1271
1272 for (i = 0; i < dev->num_tx_queues; i++) { 1276 for (i = 0; i < dev->num_tx_queues; i++) {
1273 struct netdev_queue *dev_queue; 1277 struct netdev_queue *dev_queue;
1274 struct Qdisc *qdisc; 1278 struct Qdisc *qdisc;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index a554d6d15d1b..1cf5bb5b73c4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
546 */ 546 */
547 if (net->sctp.pf_enable && 547 if (net->sctp.pf_enable &&
548 (transport->state == SCTP_ACTIVE) && 548 (transport->state == SCTP_ACTIVE) &&
549 (asoc->pf_retrans < transport->pathmaxrxt) && 549 (transport->error_count < transport->pathmaxrxt) &&
550 (transport->error_count > asoc->pf_retrans)) { 550 (transport->error_count > asoc->pf_retrans)) {
551 551
552 sctp_assoc_control_transport(asoc, transport, 552 sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index aa80cda36581..9d1f83b10c0a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -985,7 +985,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
985 return -EINVAL; 985 return -EINVAL;
986 986
987 kaddrs = memdup_user(addrs, addrs_size); 987 kaddrs = memdup_user(addrs, addrs_size);
988 if (unlikely(IS_ERR(kaddrs))) 988 if (IS_ERR(kaddrs))
989 return PTR_ERR(kaddrs); 989 return PTR_ERR(kaddrs);
990 990
991 /* Walk through the addrs buffer and count the number of addresses. */ 991 /* Walk through the addrs buffer and count the number of addresses. */
@@ -1315,7 +1315,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1315 return -EINVAL; 1315 return -EINVAL;
1316 1316
1317 kaddrs = memdup_user(addrs, addrs_size); 1317 kaddrs = memdup_user(addrs, addrs_size);
1318 if (unlikely(IS_ERR(kaddrs))) 1318 if (IS_ERR(kaddrs))
1319 return PTR_ERR(kaddrs); 1319 return PTR_ERR(kaddrs);
1320 1320
1321 /* Allow security module to validate connectx addresses. */ 1321 /* Allow security module to validate connectx addresses. */
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 25946604af85..e83cdaa2ab76 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
316 nstr_list[i] = htons(str_list[i]); 316 nstr_list[i] = htons(str_list[i]);
317 317
318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { 318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
319 kfree(nstr_list);
319 retval = -EAGAIN; 320 retval = -EAGAIN;
320 goto out; 321 goto out;
321 } 322 }
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 302e355f2ebc..5b932583e407 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -263,7 +263,7 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
263 263
264 /* Check if socket is already active */ 264 /* Check if socket is already active */
265 rc = -EINVAL; 265 rc = -EINVAL;
266 if (sk->sk_state != SMC_INIT) 266 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
267 goto out_rel; 267 goto out_rel;
268 268
269 smc->clcsock->sk->sk_reuse = sk->sk_reuse; 269 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
@@ -1390,7 +1390,8 @@ static int smc_listen(struct socket *sock, int backlog)
1390 lock_sock(sk); 1390 lock_sock(sk);
1391 1391
1392 rc = -EINVAL; 1392 rc = -EINVAL;
1393 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN)) 1393 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1394 smc->connect_nonblock)
1394 goto out; 1395 goto out;
1395 1396
1396 rc = 0; 1397 rc = 0;
@@ -1518,7 +1519,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1518 goto out; 1519 goto out;
1519 1520
1520 if (msg->msg_flags & MSG_FASTOPEN) { 1521 if (msg->msg_flags & MSG_FASTOPEN) {
1521 if (sk->sk_state == SMC_INIT) { 1522 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1522 smc_switch_to_fallback(smc); 1523 smc_switch_to_fallback(smc);
1523 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; 1524 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1524 } else { 1525 } else {
@@ -1732,14 +1733,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
1732 } 1733 }
1733 break; 1734 break;
1734 case TCP_NODELAY: 1735 case TCP_NODELAY:
1735 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) { 1736 if (sk->sk_state != SMC_INIT &&
1737 sk->sk_state != SMC_LISTEN &&
1738 sk->sk_state != SMC_CLOSED) {
1736 if (val && !smc->use_fallback) 1739 if (val && !smc->use_fallback)
1737 mod_delayed_work(system_wq, &smc->conn.tx_work, 1740 mod_delayed_work(system_wq, &smc->conn.tx_work,
1738 0); 1741 0);
1739 } 1742 }
1740 break; 1743 break;
1741 case TCP_CORK: 1744 case TCP_CORK:
1742 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) { 1745 if (sk->sk_state != SMC_INIT &&
1746 sk->sk_state != SMC_LISTEN &&
1747 sk->sk_state != SMC_CLOSED) {
1743 if (!val && !smc->use_fallback) 1748 if (!val && !smc->use_fallback)
1744 mod_delayed_work(system_wq, &smc->conn.tx_work, 1749 mod_delayed_work(system_wq, &smc->conn.tx_work,
1745 0); 1750 0);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index f0de323d15d6..6c8f09c1ce51 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
76 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 struct smc_connection *conn = &smc->conn; 77 struct smc_connection *conn = &smc->conn;
78 struct sock *sk = &smc->sk; 78 struct sock *sk = &smc->sk;
79 bool noblock;
80 long timeo; 79 long timeo;
81 int rc = 0; 80 int rc = 0;
82 81
83 /* similar to sk_stream_wait_memory */ 82 /* similar to sk_stream_wait_memory */
84 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
85 noblock = timeo ? false : true;
86 add_wait_queue(sk_sleep(sk), &wait); 84 add_wait_queue(sk_sleep(sk), &wait);
87 while (1) { 85 while (1) {
88 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
97 break; 95 break;
98 } 96 }
99 if (!timeo) { 97 if (!timeo) {
100 if (noblock) 98 /* ensure EPOLLOUT is subsequently generated */
101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
102 rc = -EAGAIN; 100 rc = -EAGAIN;
103 break; 101 break;
104 } 102 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d8679b6027e9..a07b516e503a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task)
1970static void 1970static void
1971call_bind_status(struct rpc_task *task) 1971call_bind_status(struct rpc_task *task)
1972{ 1972{
1973 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1973 int status = -EIO; 1974 int status = -EIO;
1974 1975
1975 if (rpc_task_transmitted(task)) { 1976 if (rpc_task_transmitted(task)) {
@@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task)
1977 return; 1978 return;
1978 } 1979 }
1979 1980
1980 if (task->tk_status >= 0) { 1981 dprint_status(task);
1981 dprint_status(task); 1982 trace_rpc_bind_status(task);
1983 if (task->tk_status >= 0)
1984 goto out_next;
1985 if (xprt_bound(xprt)) {
1982 task->tk_status = 0; 1986 task->tk_status = 0;
1983 task->tk_action = call_connect; 1987 goto out_next;
1984 return;
1985 } 1988 }
1986 1989
1987 trace_rpc_bind_status(task);
1988 switch (task->tk_status) { 1990 switch (task->tk_status) {
1989 case -ENOMEM: 1991 case -ENOMEM:
1990 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1992 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task)
2003 task->tk_rebind_retry--; 2005 task->tk_rebind_retry--;
2004 rpc_delay(task, 3*HZ); 2006 rpc_delay(task, 3*HZ);
2005 goto retry_timeout; 2007 goto retry_timeout;
2008 case -ENOBUFS:
2009 rpc_delay(task, HZ >> 2);
2010 goto retry_timeout;
2006 case -EAGAIN: 2011 case -EAGAIN:
2007 goto retry_timeout; 2012 goto retry_timeout;
2008 case -ETIMEDOUT: 2013 case -ETIMEDOUT:
@@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task)
2026 case -ENETDOWN: 2031 case -ENETDOWN:
2027 case -EHOSTUNREACH: 2032 case -EHOSTUNREACH:
2028 case -ENETUNREACH: 2033 case -ENETUNREACH:
2029 case -ENOBUFS:
2030 case -EPIPE: 2034 case -EPIPE:
2031 dprintk("RPC: %5u remote rpcbind unreachable: %d\n", 2035 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2032 task->tk_pid, task->tk_status); 2036 task->tk_pid, task->tk_status);
@@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task)
2043 2047
2044 rpc_call_rpcerror(task, status); 2048 rpc_call_rpcerror(task, status);
2045 return; 2049 return;
2046 2050out_next:
2051 task->tk_action = call_connect;
2052 return;
2047retry_timeout: 2053retry_timeout:
2048 task->tk_status = 0; 2054 task->tk_status = 0;
2049 task->tk_action = call_bind; 2055 task->tk_action = call_bind;
@@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task)
2090static void 2096static void
2091call_connect_status(struct rpc_task *task) 2097call_connect_status(struct rpc_task *task)
2092{ 2098{
2099 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2093 struct rpc_clnt *clnt = task->tk_client; 2100 struct rpc_clnt *clnt = task->tk_client;
2094 int status = task->tk_status; 2101 int status = task->tk_status;
2095 2102
@@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task)
2099 } 2106 }
2100 2107
2101 dprint_status(task); 2108 dprint_status(task);
2102
2103 trace_rpc_connect_status(task); 2109 trace_rpc_connect_status(task);
2110
2111 if (task->tk_status == 0) {
2112 clnt->cl_stats->netreconn++;
2113 goto out_next;
2114 }
2115 if (xprt_connected(xprt)) {
2116 task->tk_status = 0;
2117 goto out_next;
2118 }
2119
2104 task->tk_status = 0; 2120 task->tk_status = 0;
2105 switch (status) { 2121 switch (status) {
2106 case -ECONNREFUSED: 2122 case -ECONNREFUSED:
@@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task)
2117 case -ENETDOWN: 2133 case -ENETDOWN:
2118 case -ENETUNREACH: 2134 case -ENETUNREACH:
2119 case -EHOSTUNREACH: 2135 case -EHOSTUNREACH:
2120 case -EADDRINUSE:
2121 case -ENOBUFS:
2122 case -EPIPE: 2136 case -EPIPE:
2123 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2137 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2124 task->tk_rqstp->rq_connect_cookie); 2138 task->tk_rqstp->rq_connect_cookie);
@@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task)
2127 /* retry with existing socket, after a delay */ 2141 /* retry with existing socket, after a delay */
2128 rpc_delay(task, 3*HZ); 2142 rpc_delay(task, 3*HZ);
2129 /* fall through */ 2143 /* fall through */
2144 case -EADDRINUSE:
2130 case -ENOTCONN: 2145 case -ENOTCONN:
2131 case -EAGAIN: 2146 case -EAGAIN:
2132 case -ETIMEDOUT: 2147 case -ETIMEDOUT:
2133 goto out_retry; 2148 goto out_retry;
2134 case 0: 2149 case -ENOBUFS:
2135 clnt->cl_stats->netreconn++; 2150 rpc_delay(task, HZ >> 2);
2136 task->tk_action = call_transmit; 2151 goto out_retry;
2137 return;
2138 } 2152 }
2139 rpc_call_rpcerror(task, status); 2153 rpc_call_rpcerror(task, status);
2140 return; 2154 return;
2155out_next:
2156 task->tk_action = call_transmit;
2157 return;
2141out_retry: 2158out_retry:
2142 /* Check for timeouts before looping back to call_bind */ 2159 /* Check for timeouts before looping back to call_bind */
2143 task->tk_action = call_bind; 2160 task->tk_action = call_bind;
@@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task)
2365 case -ECONNABORTED: 2382 case -ECONNABORTED:
2366 case -ENOTCONN: 2383 case -ENOTCONN:
2367 rpc_force_rebind(clnt); 2384 rpc_force_rebind(clnt);
2368 /* fall through */ 2385 break;
2369 case -EADDRINUSE: 2386 case -EADDRINUSE:
2370 rpc_delay(task, 3*HZ); 2387 rpc_delay(task, 3*HZ);
2371 /* fall through */ 2388 /* fall through */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 783748dc5e6f..2e71f5455c6c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1408 status = -EBADMSG; 1408 status = -EBADMSG;
1409 goto out_dequeue; 1409 goto out_dequeue;
1410 } 1410 }
1411 if (task->tk_ops->rpc_call_prepare_transmit) {
1412 task->tk_ops->rpc_call_prepare_transmit(task,
1413 task->tk_calldata);
1414 status = task->tk_status;
1415 if (status < 0)
1416 goto out_dequeue;
1417 }
1418 if (RPC_SIGNALLED(task)) { 1411 if (RPC_SIGNALLED(task)) {
1419 status = -ERESTARTSYS; 1412 status = -ERESTARTSYS;
1420 goto out_dequeue; 1413 goto out_dequeue;
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d00913..0f1eaed1bd1b 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
75 tipc_set_node_id(net, node_id); 75 tipc_set_node_id(net, node_id);
76 } 76 }
77 tn->trial_addr = addr; 77 tn->trial_addr = addr;
78 tn->addr_trial_end = jiffies;
78 pr_info("32-bit node address hash set to %x\n", addr); 79 pr_info("32-bit node address hash set to %x\n", addr);
79} 80}
80 81
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 66d3a07bc571..c2c5c53cad22 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,8 +106,6 @@ struct tipc_stats {
106 * @transmitq: queue for sent, non-acked messages 106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent 107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages 108 * @snt_nxt: next sequence number to use for outbound messages
109 * @prev_from: sequence number of most previous retransmission request
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released 109 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast. 110 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages 111 * @rcv_nxt: next sequence number to expect for inbound messages
@@ -164,9 +162,7 @@ struct tipc_link {
164 u16 limit; 162 u16 limit;
165 } backlog[5]; 163 } backlog[5];
166 u16 snd_nxt; 164 u16 snd_nxt;
167 u16 prev_from;
168 u16 window; 165 u16 window;
169 unsigned long stale_limit;
170 166
171 /* Reception */ 167 /* Reception */
172 u16 rcv_nxt; 168 u16 rcv_nxt;
@@ -1044,47 +1040,53 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
1044 * link_retransmit_failure() - Detect repeated retransmit failures 1040 * link_retransmit_failure() - Detect repeated retransmit failures
1045 * @l: tipc link sender 1041 * @l: tipc link sender
1046 * @r: tipc link receiver (= l in case of unicast) 1042 * @r: tipc link receiver (= l in case of unicast)
1047 * @from: seqno of the 1st packet in retransmit request
1048 * @rc: returned code 1043 * @rc: returned code
1049 * 1044 *
1050 * Return: true if the repeated retransmit failures happens, otherwise 1045 * Return: true if the repeated retransmit failures happens, otherwise
1051 * false 1046 * false
1052 */ 1047 */
1053static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, 1048static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1054 u16 from, int *rc) 1049 int *rc)
1055{ 1050{
1056 struct sk_buff *skb = skb_peek(&l->transmq); 1051 struct sk_buff *skb = skb_peek(&l->transmq);
1057 struct tipc_msg *hdr; 1052 struct tipc_msg *hdr;
1058 1053
1059 if (!skb) 1054 if (!skb)
1060 return false; 1055 return false;
1061 hdr = buf_msg(skb);
1062 1056
1063 /* Detect repeated retransmit failures on same packet */ 1057 if (!TIPC_SKB_CB(skb)->retr_cnt)
1064 if (r->prev_from != from) { 1058 return false;
1065 r->prev_from = from;
1066 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1067 } else if (time_after(jiffies, r->stale_limit)) {
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr),
1072 msg_errcode(hdr));
1073 pr_info("sqno %u, prev: %x, src: %x\n",
1074 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1075
1076 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1077 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1078 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1079 1059
1080 if (link_is_bc_sndlink(l)) 1060 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1081 *rc = TIPC_LINK_DOWN_EVT; 1061 msecs_to_jiffies(r->tolerance)))
1062 return false;
1063
1064 hdr = buf_msg(skb);
1065 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1066 return false;
1082 1067
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1072 pr_info("sqno %u, prev: %x, dest: %x\n",
1073 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1074 pr_info("retr_stamp %d, retr_cnt %d\n",
1075 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1076 TIPC_SKB_CB(skb)->retr_cnt);
1077
1078 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1079 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1080 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1081
1082 if (link_is_bc_sndlink(l)) {
1083 r->state = LINK_RESET;
1084 *rc = TIPC_LINK_DOWN_EVT;
1085 } else {
1083 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1086 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1084 return true;
1085 } 1087 }
1086 1088
1087 return false; 1089 return true;
1088} 1090}
1089 1091
1090/* tipc_link_bc_retrans() - retransmit zero or more packets 1092/* tipc_link_bc_retrans() - retransmit zero or more packets
@@ -1110,7 +1112,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1110 1112
1111 trace_tipc_link_retrans(r, from, to, &l->transmq); 1113 trace_tipc_link_retrans(r, from, to, &l->transmq);
1112 1114
1113 if (link_retransmit_failure(l, r, from, &rc)) 1115 if (link_retransmit_failure(l, r, &rc))
1114 return rc; 1116 return rc;
1115 1117
1116 skb_queue_walk(&l->transmq, skb) { 1118 skb_queue_walk(&l->transmq, skb) {
@@ -1119,11 +1121,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1119 continue; 1121 continue;
1120 if (more(msg_seqno(hdr), to)) 1122 if (more(msg_seqno(hdr), to))
1121 break; 1123 break;
1122 if (link_is_bc_sndlink(l)) { 1124
1123 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1125 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1124 continue; 1126 continue;
1125 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1127 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1126 }
1127 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); 1128 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1128 if (!_skb) 1129 if (!_skb)
1129 return 0; 1130 return 0;
@@ -1133,6 +1134,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1133 _skb->priority = TC_PRIO_CONTROL; 1134 _skb->priority = TC_PRIO_CONTROL;
1134 __skb_queue_tail(xmitq, _skb); 1135 __skb_queue_tail(xmitq, _skb);
1135 l->stats.retransmitted++; 1136 l->stats.retransmitted++;
1137
1138 /* Increase actual retrans counter & mark first time */
1139 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1140 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1136 } 1141 }
1137 return 0; 1142 return 0;
1138} 1143}
@@ -1357,12 +1362,10 @@ static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1357 struct tipc_msg *hdr; 1362 struct tipc_msg *hdr;
1358 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1363 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1359 u16 ack = l->rcv_nxt - 1; 1364 u16 ack = l->rcv_nxt - 1;
1365 bool passed = false;
1360 u16 seqno, n = 0; 1366 u16 seqno, n = 0;
1361 int rc = 0; 1367 int rc = 0;
1362 1368
1363 if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1364 return rc;
1365
1366 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1369 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1367 seqno = buf_seqno(skb); 1370 seqno = buf_seqno(skb);
1368 1371
@@ -1372,12 +1375,17 @@ next_gap_ack:
1372 __skb_unlink(skb, &l->transmq); 1375 __skb_unlink(skb, &l->transmq);
1373 kfree_skb(skb); 1376 kfree_skb(skb);
1374 } else if (less_eq(seqno, acked + gap)) { 1377 } else if (less_eq(seqno, acked + gap)) {
1375 /* retransmit skb */ 1378 /* First, check if repeated retrans failures occurs? */
1379 if (!passed && link_retransmit_failure(l, l, &rc))
1380 return rc;
1381 passed = true;
1382
1383 /* retransmit skb if unrestricted*/
1376 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1384 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1377 continue; 1385 continue;
1378 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; 1386 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1379 1387 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
1380 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1388 GFP_ATOMIC);
1381 if (!_skb) 1389 if (!_skb)
1382 continue; 1390 continue;
1383 hdr = buf_msg(_skb); 1391 hdr = buf_msg(_skb);
@@ -1386,6 +1394,10 @@ next_gap_ack:
1386 _skb->priority = TC_PRIO_CONTROL; 1394 _skb->priority = TC_PRIO_CONTROL;
1387 __skb_queue_tail(xmitq, _skb); 1395 __skb_queue_tail(xmitq, _skb);
1388 l->stats.retransmitted++; 1396 l->stats.retransmitted++;
1397
1398 /* Increase actual retrans counter & mark first time */
1399 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1400 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1389 } else { 1401 } else {
1390 /* retry with Gap ACK blocks if any */ 1402 /* retry with Gap ACK blocks if any */
1391 if (!ga || n >= ga->gack_cnt) 1403 if (!ga || n >= ga->gack_cnt)
@@ -2577,7 +2589,7 @@ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2577 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); 2589 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2578 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); 2590 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2579 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); 2591 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2580 i += scnprintf(buf + i, sz - i, " %u", l->prev_from); 2592 i += scnprintf(buf + i, sz - i, " %u", 0);
2581 i += scnprintf(buf + i, sz - i, " %u", 0); 2593 i += scnprintf(buf + i, sz - i, " %u", 0);
2582 i += scnprintf(buf + i, sz - i, " %u", l->acked); 2594 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2583 2595
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index da509f0eb9ca..d7ebc9e955f6 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,13 +102,15 @@ struct plist;
102#define TIPC_MEDIA_INFO_OFFSET 5 102#define TIPC_MEDIA_INFO_OFFSET 5
103 103
104struct tipc_skb_cb { 104struct tipc_skb_cb {
105 u32 bytes_read;
106 u32 orig_member;
107 struct sk_buff *tail; 105 struct sk_buff *tail;
108 unsigned long nxt_retr; 106 unsigned long nxt_retr;
109 bool validated; 107 unsigned long retr_stamp;
108 u32 bytes_read;
109 u32 orig_member;
110 u16 chain_imp; 110 u16 chain_imp;
111 u16 ackers; 111 u16 ackers;
112 u16 retr_cnt;
113 bool validated;
112}; 114};
113 115
114#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 116#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index d86030ef1232..e135d4e11231 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
55 int rep_type; 55 int rep_type;
56 int rep_size; 56 int rep_size;
57 int req_type; 57 int req_type;
58 int req_size;
58 struct net *net; 59 struct net *net;
59 struct sk_buff *rep; 60 struct sk_buff *rep;
60 struct tlv_desc *req; 61 struct tlv_desc *req;
@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
257 int err; 258 int err;
258 struct sk_buff *arg; 259 struct sk_buff *arg;
259 260
260 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) 261 if (msg->req_type && (!msg->req_size ||
262 !TLV_CHECK_TYPE(msg->req, msg->req_type)))
261 return -EINVAL; 263 return -EINVAL;
262 264
263 msg->rep = tipc_tlv_alloc(msg->rep_size); 265 msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
354{ 356{
355 int err; 357 int err;
356 358
357 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) 359 if (msg->req_type && (!msg->req_size ||
360 !TLV_CHECK_TYPE(msg->req, msg->req_type)))
358 return -EINVAL; 361 return -EINVAL;
359 362
360 err = __tipc_nl_compat_doit(cmd, msg); 363 err = __tipc_nl_compat_doit(cmd, msg);
@@ -1278,8 +1281,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1278 goto send; 1281 goto send;
1279 } 1282 }
1280 1283
1281 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); 1284 msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1282 if (!len || !TLV_OK(msg.req, len)) { 1285 if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
1283 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); 1286 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1284 err = -EOPNOTSUPP; 1287 err = -EOPNOTSUPP;
1285 goto send; 1288 goto send;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index dd8537f988c4..83ae41d7e554 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -485,9 +485,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
485 tsk_set_unreturnable(tsk, true); 485 tsk_set_unreturnable(tsk, true);
486 if (sock->type == SOCK_DGRAM) 486 if (sock->type == SOCK_DGRAM)
487 tsk_set_unreliable(tsk, true); 487 tsk_set_unreliable(tsk, true);
488 __skb_queue_head_init(&tsk->mc_method.deferredq);
489 } 488 }
490 489 __skb_queue_head_init(&tsk->mc_method.deferredq);
491 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " "); 490 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
492 return 0; 491 return 0;
493} 492}
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index f345662890a6..ca8ac96d22a9 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -476,7 +476,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
476 } 476 }
477} 477}
478 478
479/* tipc_toprsv_listener_data_ready - interrupt callback with connection request 479/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
480 * The queued job is launched into tipc_topsrv_accept() 480 * The queued job is launched into tipc_topsrv_accept()
481 */ 481 */
482static void tipc_topsrv_listener_data_ready(struct sock *sk) 482static void tipc_topsrv_listener_data_ready(struct sock *sk)
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 7c0b2b778703..43922d86e510 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -373,9 +373,9 @@ static int tls_push_data(struct sock *sk,
373 struct tls_context *tls_ctx = tls_get_ctx(sk); 373 struct tls_context *tls_ctx = tls_get_ctx(sk);
374 struct tls_prot_info *prot = &tls_ctx->prot_info; 374 struct tls_prot_info *prot = &tls_ctx->prot_info;
375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); 375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
376 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
377 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); 376 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
378 struct tls_record_info *record = ctx->open_record; 377 struct tls_record_info *record = ctx->open_record;
378 int tls_push_record_flags;
379 struct page_frag *pfrag; 379 struct page_frag *pfrag;
380 size_t orig_size = size; 380 size_t orig_size = size;
381 u32 max_open_record_len; 381 u32 max_open_record_len;
@@ -390,6 +390,9 @@ static int tls_push_data(struct sock *sk,
390 if (sk->sk_err) 390 if (sk->sk_err)
391 return -sk->sk_err; 391 return -sk->sk_err;
392 392
393 flags |= MSG_SENDPAGE_DECRYPTED;
394 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
395
393 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 396 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
394 if (tls_is_partially_sent_record(tls_ctx)) { 397 if (tls_is_partially_sent_record(tls_ctx)) {
395 rc = tls_push_partial_record(sk, tls_ctx, flags); 398 rc = tls_push_partial_record(sk, tls_ctx, flags);
@@ -576,7 +579,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
576 gfp_t sk_allocation = sk->sk_allocation; 579 gfp_t sk_allocation = sk->sk_allocation;
577 580
578 sk->sk_allocation = GFP_ATOMIC; 581 sk->sk_allocation = GFP_ATOMIC;
579 tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); 582 tls_push_partial_record(sk, ctx,
583 MSG_DONTWAIT | MSG_NOSIGNAL |
584 MSG_SENDPAGE_DECRYPTED);
580 sk->sk_allocation = sk_allocation; 585 sk->sk_allocation = sk_allocation;
581 } 586 }
582} 587}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 4674e57e66b0..43252a801c3f 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -261,24 +261,9 @@ void tls_ctx_free(struct tls_context *ctx)
261 kfree(ctx); 261 kfree(ctx);
262} 262}
263 263
264static void tls_sk_proto_close(struct sock *sk, long timeout) 264static void tls_sk_proto_cleanup(struct sock *sk,
265 struct tls_context *ctx, long timeo)
265{ 266{
266 struct tls_context *ctx = tls_get_ctx(sk);
267 long timeo = sock_sndtimeo(sk, 0);
268 void (*sk_proto_close)(struct sock *sk, long timeout);
269 bool free_ctx = false;
270
271 lock_sock(sk);
272 sk_proto_close = ctx->sk_proto_close;
273
274 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
275 goto skip_tx_cleanup;
276
277 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
278 free_ctx = true;
279 goto skip_tx_cleanup;
280 }
281
282 if (unlikely(sk->sk_write_pending) && 267 if (unlikely(sk->sk_write_pending) &&
283 !wait_on_pending_writer(sk, &timeo)) 268 !wait_on_pending_writer(sk, &timeo))
284 tls_handle_open_record(sk, 0); 269 tls_handle_open_record(sk, 0);
@@ -287,7 +272,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
287 if (ctx->tx_conf == TLS_SW) { 272 if (ctx->tx_conf == TLS_SW) {
288 kfree(ctx->tx.rec_seq); 273 kfree(ctx->tx.rec_seq);
289 kfree(ctx->tx.iv); 274 kfree(ctx->tx.iv);
290 tls_sw_free_resources_tx(sk); 275 tls_sw_release_resources_tx(sk);
291#ifdef CONFIG_TLS_DEVICE 276#ifdef CONFIG_TLS_DEVICE
292 } else if (ctx->tx_conf == TLS_HW) { 277 } else if (ctx->tx_conf == TLS_HW) {
293 tls_device_free_resources_tx(sk); 278 tls_device_free_resources_tx(sk);
@@ -295,26 +280,46 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
295 } 280 }
296 281
297 if (ctx->rx_conf == TLS_SW) 282 if (ctx->rx_conf == TLS_SW)
298 tls_sw_free_resources_rx(sk); 283 tls_sw_release_resources_rx(sk);
299 284
300#ifdef CONFIG_TLS_DEVICE 285#ifdef CONFIG_TLS_DEVICE
301 if (ctx->rx_conf == TLS_HW) 286 if (ctx->rx_conf == TLS_HW)
302 tls_device_offload_cleanup_rx(sk); 287 tls_device_offload_cleanup_rx(sk);
303
304 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
305#else
306 {
307#endif 288#endif
308 tls_ctx_free(ctx); 289}
309 ctx = NULL;
310 }
311 290
312skip_tx_cleanup: 291static void tls_sk_proto_close(struct sock *sk, long timeout)
292{
293 struct inet_connection_sock *icsk = inet_csk(sk);
294 struct tls_context *ctx = tls_get_ctx(sk);
295 long timeo = sock_sndtimeo(sk, 0);
296 bool free_ctx;
297
298 if (ctx->tx_conf == TLS_SW)
299 tls_sw_cancel_work_tx(ctx);
300
301 lock_sock(sk);
302 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
303
304 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
305 tls_sk_proto_cleanup(sk, ctx, timeo);
306
307 write_lock_bh(&sk->sk_callback_lock);
308 if (free_ctx)
309 icsk->icsk_ulp_data = NULL;
310 sk->sk_prot = ctx->sk_proto;
311 if (sk->sk_write_space == tls_write_space)
312 sk->sk_write_space = ctx->sk_write_space;
313 write_unlock_bh(&sk->sk_callback_lock);
313 release_sock(sk); 314 release_sock(sk);
314 sk_proto_close(sk, timeout); 315 if (ctx->tx_conf == TLS_SW)
315 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 316 tls_sw_free_ctx_tx(ctx);
316 * for sk->sk_prot->unhash [tls_hw_unhash] 317 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
317 */ 318 tls_sw_strparser_done(ctx);
319 if (ctx->rx_conf == TLS_SW)
320 tls_sw_free_ctx_rx(ctx);
321 ctx->sk_proto_close(sk, timeout);
322
318 if (free_ctx) 323 if (free_ctx)
319 tls_ctx_free(ctx); 324 tls_ctx_free(ctx);
320} 325}
@@ -526,6 +531,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
526 { 531 {
527#endif 532#endif
528 rc = tls_set_sw_offload(sk, ctx, 1); 533 rc = tls_set_sw_offload(sk, ctx, 1);
534 if (rc)
535 goto err_crypto_info;
529 conf = TLS_SW; 536 conf = TLS_SW;
530 } 537 }
531 } else { 538 } else {
@@ -537,13 +544,13 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
537 { 544 {
538#endif 545#endif
539 rc = tls_set_sw_offload(sk, ctx, 0); 546 rc = tls_set_sw_offload(sk, ctx, 0);
547 if (rc)
548 goto err_crypto_info;
540 conf = TLS_SW; 549 conf = TLS_SW;
541 } 550 }
551 tls_sw_strparser_arm(sk, ctx);
542 } 552 }
543 553
544 if (rc)
545 goto err_crypto_info;
546
547 if (tx) 554 if (tx)
548 ctx->tx_conf = conf; 555 ctx->tx_conf = conf;
549 else 556 else
@@ -607,6 +614,7 @@ static struct tls_context *create_ctx(struct sock *sk)
607 ctx->setsockopt = sk->sk_prot->setsockopt; 614 ctx->setsockopt = sk->sk_prot->setsockopt;
608 ctx->getsockopt = sk->sk_prot->getsockopt; 615 ctx->getsockopt = sk->sk_prot->getsockopt;
609 ctx->sk_proto_close = sk->sk_prot->close; 616 ctx->sk_proto_close = sk->sk_prot->close;
617 ctx->unhash = sk->sk_prot->unhash;
610 return ctx; 618 return ctx;
611} 619}
612 620
@@ -764,7 +772,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
764 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 772 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
765 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 773 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
766 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 774 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
767 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
768} 775}
769 776
770static int tls_init(struct sock *sk) 777static int tls_init(struct sock *sk)
@@ -773,7 +780,7 @@ static int tls_init(struct sock *sk)
773 int rc = 0; 780 int rc = 0;
774 781
775 if (tls_hw_prot(sk)) 782 if (tls_hw_prot(sk))
776 goto out; 783 return 0;
777 784
778 /* The TLS ulp is currently supported only for TCP sockets 785 /* The TLS ulp is currently supported only for TCP sockets
779 * in ESTABLISHED state. 786 * in ESTABLISHED state.
@@ -784,21 +791,38 @@ static int tls_init(struct sock *sk)
784 if (sk->sk_state != TCP_ESTABLISHED) 791 if (sk->sk_state != TCP_ESTABLISHED)
785 return -ENOTSUPP; 792 return -ENOTSUPP;
786 793
794 tls_build_proto(sk);
795
787 /* allocate tls context */ 796 /* allocate tls context */
797 write_lock_bh(&sk->sk_callback_lock);
788 ctx = create_ctx(sk); 798 ctx = create_ctx(sk);
789 if (!ctx) { 799 if (!ctx) {
790 rc = -ENOMEM; 800 rc = -ENOMEM;
791 goto out; 801 goto out;
792 } 802 }
793 803
794 tls_build_proto(sk);
795 ctx->tx_conf = TLS_BASE; 804 ctx->tx_conf = TLS_BASE;
796 ctx->rx_conf = TLS_BASE; 805 ctx->rx_conf = TLS_BASE;
806 ctx->sk_proto = sk->sk_prot;
797 update_sk_prot(sk, ctx); 807 update_sk_prot(sk, ctx);
798out: 808out:
809 write_unlock_bh(&sk->sk_callback_lock);
799 return rc; 810 return rc;
800} 811}
801 812
813static void tls_update(struct sock *sk, struct proto *p)
814{
815 struct tls_context *ctx;
816
817 ctx = tls_get_ctx(sk);
818 if (likely(ctx)) {
819 ctx->sk_proto_close = p->close;
820 ctx->sk_proto = p;
821 } else {
822 sk->sk_prot = p;
823 }
824}
825
802void tls_register_device(struct tls_device *device) 826void tls_register_device(struct tls_device *device)
803{ 827{
804 spin_lock_bh(&device_spinlock); 828 spin_lock_bh(&device_spinlock);
@@ -819,6 +843,7 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
819 .name = "tls", 843 .name = "tls",
820 .owner = THIS_MODULE, 844 .owner = THIS_MODULE,
821 .init = tls_init, 845 .init = tls_init,
846 .update = tls_update,
822}; 847};
823 848
824static int __init tls_register(void) 849static int __init tls_register(void)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 53b4ad94e74a..91d21b048a9b 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2054,7 +2054,16 @@ static void tls_data_ready(struct sock *sk)
2054 } 2054 }
2055} 2055}
2056 2056
2057void tls_sw_free_resources_tx(struct sock *sk) 2057void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2058{
2059 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2060
2061 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2062 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2063 cancel_delayed_work_sync(&ctx->tx_work.work);
2064}
2065
2066void tls_sw_release_resources_tx(struct sock *sk)
2058{ 2067{
2059 struct tls_context *tls_ctx = tls_get_ctx(sk); 2068 struct tls_context *tls_ctx = tls_get_ctx(sk);
2060 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2069 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
@@ -2065,11 +2074,6 @@ void tls_sw_free_resources_tx(struct sock *sk)
2065 if (atomic_read(&ctx->encrypt_pending)) 2074 if (atomic_read(&ctx->encrypt_pending))
2066 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2075 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2067 2076
2068 release_sock(sk);
2069 cancel_delayed_work_sync(&ctx->tx_work.work);
2070 lock_sock(sk);
2071
2072 /* Tx whatever records we can transmit and abandon the rest */
2073 tls_tx_records(sk, -1); 2077 tls_tx_records(sk, -1);
2074 2078
2075 /* Free up un-sent records in tx_list. First, free 2079 /* Free up un-sent records in tx_list. First, free
@@ -2092,6 +2096,11 @@ void tls_sw_free_resources_tx(struct sock *sk)
2092 2096
2093 crypto_free_aead(ctx->aead_send); 2097 crypto_free_aead(ctx->aead_send);
2094 tls_free_open_rec(sk); 2098 tls_free_open_rec(sk);
2099}
2100
2101void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2102{
2103 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2095 2104
2096 kfree(ctx); 2105 kfree(ctx);
2097} 2106}
@@ -2110,25 +2119,40 @@ void tls_sw_release_resources_rx(struct sock *sk)
2110 skb_queue_purge(&ctx->rx_list); 2119 skb_queue_purge(&ctx->rx_list);
2111 crypto_free_aead(ctx->aead_recv); 2120 crypto_free_aead(ctx->aead_recv);
2112 strp_stop(&ctx->strp); 2121 strp_stop(&ctx->strp);
2113 write_lock_bh(&sk->sk_callback_lock); 2122 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2114 sk->sk_data_ready = ctx->saved_data_ready; 2123 * we still want to strp_stop(), but sk->sk_data_ready was
2115 write_unlock_bh(&sk->sk_callback_lock); 2124 * never swapped.
2116 release_sock(sk); 2125 */
2117 strp_done(&ctx->strp); 2126 if (ctx->saved_data_ready) {
2118 lock_sock(sk); 2127 write_lock_bh(&sk->sk_callback_lock);
2128 sk->sk_data_ready = ctx->saved_data_ready;
2129 write_unlock_bh(&sk->sk_callback_lock);
2130 }
2119 } 2131 }
2120} 2132}
2121 2133
2122void tls_sw_free_resources_rx(struct sock *sk) 2134void tls_sw_strparser_done(struct tls_context *tls_ctx)
2123{ 2135{
2124 struct tls_context *tls_ctx = tls_get_ctx(sk);
2125 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2136 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2126 2137
2127 tls_sw_release_resources_rx(sk); 2138 strp_done(&ctx->strp);
2139}
2140
2141void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2142{
2143 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2128 2144
2129 kfree(ctx); 2145 kfree(ctx);
2130} 2146}
2131 2147
2148void tls_sw_free_resources_rx(struct sock *sk)
2149{
2150 struct tls_context *tls_ctx = tls_get_ctx(sk);
2151
2152 tls_sw_release_resources_rx(sk);
2153 tls_sw_free_ctx_rx(tls_ctx);
2154}
2155
2132/* The work handler to transmitt the encrypted records in tx_list */ 2156/* The work handler to transmitt the encrypted records in tx_list */
2133static void tx_work_handler(struct work_struct *work) 2157static void tx_work_handler(struct work_struct *work)
2134{ 2158{
@@ -2137,11 +2161,17 @@ static void tx_work_handler(struct work_struct *work)
2137 struct tx_work, work); 2161 struct tx_work, work);
2138 struct sock *sk = tx_work->sk; 2162 struct sock *sk = tx_work->sk;
2139 struct tls_context *tls_ctx = tls_get_ctx(sk); 2163 struct tls_context *tls_ctx = tls_get_ctx(sk);
2140 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2164 struct tls_sw_context_tx *ctx;
2141 2165
2142 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2166 if (unlikely(!tls_ctx))
2143 return; 2167 return;
2144 2168
2169 ctx = tls_sw_ctx_tx(tls_ctx);
2170 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2171 return;
2172
2173 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2174 return;
2145 lock_sock(sk); 2175 lock_sock(sk);
2146 tls_tx_records(sk, -1); 2176 tls_tx_records(sk, -1);
2147 release_sock(sk); 2177 release_sock(sk);
@@ -2160,6 +2190,18 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2160 } 2190 }
2161} 2191}
2162 2192
2193void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2194{
2195 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2196
2197 write_lock_bh(&sk->sk_callback_lock);
2198 rx_ctx->saved_data_ready = sk->sk_data_ready;
2199 sk->sk_data_ready = tls_data_ready;
2200 write_unlock_bh(&sk->sk_callback_lock);
2201
2202 strp_check_rcv(&rx_ctx->strp);
2203}
2204
2163int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 2205int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2164{ 2206{
2165 struct tls_context *tls_ctx = tls_get_ctx(sk); 2207 struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -2357,13 +2399,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2357 cb.parse_msg = tls_read_size; 2399 cb.parse_msg = tls_read_size;
2358 2400
2359 strp_init(&sw_ctx_rx->strp, sk, &cb); 2401 strp_init(&sw_ctx_rx->strp, sk, &cb);
2360
2361 write_lock_bh(&sk->sk_callback_lock);
2362 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
2363 sk->sk_data_ready = tls_data_ready;
2364 write_unlock_bh(&sk->sk_callback_lock);
2365
2366 strp_check_rcv(&sw_ctx_rx->strp);
2367 } 2402 }
2368 2403
2369 goto out; 2404 goto out;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index f2084e3f7aa4..9d864ebeb7b3 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -312,6 +312,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
312 lock_sock(sk); 312 lock_sock(sk);
313 hvs_do_close_lock_held(vsock_sk(sk), true); 313 hvs_do_close_lock_held(vsock_sk(sk), true);
314 release_sock(sk); 314 release_sock(sk);
315
316 /* Release the refcnt for the channel that's opened in
317 * hvs_open_connection().
318 */
319 sock_put(sk);
315} 320}
316 321
317static void hvs_open_connection(struct vmbus_channel *chan) 322static void hvs_open_connection(struct vmbus_channel *chan)
@@ -407,6 +412,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
407 } 412 }
408 413
409 set_per_channel_state(chan, conn_from_host ? new : sk); 414 set_per_channel_state(chan, conn_from_host ? new : sk);
415
416 /* This reference will be dropped by hvs_close_connection(). */
417 sock_hold(conn_from_host ? new : sk);
410 vmbus_set_chn_rescind_callback(chan, hvs_close_connection); 418 vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
411 419
412 /* Set the pending send size to max packet size to always get 420 /* Set the pending send size to max packet size to always get
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 45d9afcff6d5..32b3c719fdfc 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1410,10 +1410,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
1410 } 1410 }
1411 break; 1411 break;
1412 case NETDEV_PRE_UP: 1412 case NETDEV_PRE_UP:
1413 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) && 1413 if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
1414 !(wdev->iftype == NL80211_IFTYPE_AP_VLAN && 1414 wdev->use_4addr, 0))
1415 rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
1416 wdev->use_4addr))
1417 return notifier_from_errno(-EOPNOTSUPP); 1415 return notifier_from_errno(-EOPNOTSUPP);
1418 1416
1419 if (rfkill_blocked(rdev->rfkill)) 1417 if (rfkill_blocked(rdev->rfkill))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fc83dd179c1a..fd05ae1437a9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3484,9 +3484,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3484 return err; 3484 return err;
3485 } 3485 }
3486 3486
3487 if (!(rdev->wiphy.interface_modes & (1 << type)) && 3487 if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
3488 !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
3489 rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
3490 return -EOPNOTSUPP; 3488 return -EOPNOTSUPP;
3491 3489
3492 err = nl80211_parse_mon_options(rdev, type, info, &params); 3490 err = nl80211_parse_mon_options(rdev, type, info, &params);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4831ad745f91..327479ce69f5 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
2788 2788
2789 /* When last_request->processed becomes true this will be rescheduled */ 2789 /* When last_request->processed becomes true this will be rescheduled */
2790 if (lr && !lr->processed) { 2790 if (lr && !lr->processed) {
2791 reg_process_hint(lr); 2791 pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2792 return; 2792 return;
2793 } 2793 }
2794 2794
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1c39d6a2e850..e74837824cea 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
233 233
234 switch (params->cipher) { 234 switch (params->cipher) {
235 case WLAN_CIPHER_SUITE_TKIP: 235 case WLAN_CIPHER_SUITE_TKIP:
236 /* Extended Key ID can only be used with CCMP/GCMP ciphers */
237 if ((pairwise && key_idx) ||
238 params->mode != NL80211_KEY_RX_TX)
239 return -EINVAL;
240 break;
236 case WLAN_CIPHER_SUITE_CCMP: 241 case WLAN_CIPHER_SUITE_CCMP:
237 case WLAN_CIPHER_SUITE_CCMP_256: 242 case WLAN_CIPHER_SUITE_CCMP_256:
238 case WLAN_CIPHER_SUITE_GCMP: 243 case WLAN_CIPHER_SUITE_GCMP:
239 case WLAN_CIPHER_SUITE_GCMP_256: 244 case WLAN_CIPHER_SUITE_GCMP_256:
240 /* IEEE802.11-2016 allows only 0 and - when using Extended Key 245 /* IEEE802.11-2016 allows only 0 and - when supporting
241 * ID - 1 as index for pairwise keys. 246 * Extended Key ID - 1 as index for pairwise keys.
242 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when 247 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
243 * the driver supports Extended Key ID. 248 * the driver supports Extended Key ID.
244 * @NL80211_KEY_SET_TX can't be set when installing and 249 * @NL80211_KEY_SET_TX can't be set when installing and
245 * validating a key. 250 * validating a key.
246 */ 251 */
247 if (params->mode == NL80211_KEY_NO_TX) { 252 if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
248 if (!wiphy_ext_feature_isset(&rdev->wiphy, 253 params->mode == NL80211_KEY_SET_TX)
249 NL80211_EXT_FEATURE_EXT_KEY_ID)) 254 return -EINVAL;
250 return -EINVAL; 255 if (wiphy_ext_feature_isset(&rdev->wiphy,
251 else if (!pairwise || key_idx < 0 || key_idx > 1) 256 NL80211_EXT_FEATURE_EXT_KEY_ID)) {
257 if (pairwise && (key_idx < 0 || key_idx > 1))
252 return -EINVAL; 258 return -EINVAL;
253 } else if ((pairwise && key_idx) || 259 } else if (pairwise && key_idx) {
254 params->mode == NL80211_KEY_SET_TX) {
255 return -EINVAL; 260 return -EINVAL;
256 } 261 }
257 break; 262 break;
@@ -1697,7 +1702,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
1697 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { 1702 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1698 num_interfaces += params->iftype_num[iftype]; 1703 num_interfaces += params->iftype_num[iftype];
1699 if (params->iftype_num[iftype] > 0 && 1704 if (params->iftype_num[iftype] > 0 &&
1700 !(wiphy->software_iftypes & BIT(iftype))) 1705 !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
1701 used_iftypes |= BIT(iftype); 1706 used_iftypes |= BIT(iftype);
1702 } 1707 }
1703 1708
@@ -1719,7 +1724,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
1719 return -ENOMEM; 1724 return -ENOMEM;
1720 1725
1721 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { 1726 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1722 if (wiphy->software_iftypes & BIT(iftype)) 1727 if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
1723 continue; 1728 continue;
1724 for (j = 0; j < c->n_limits; j++) { 1729 for (j = 0; j < c->n_limits; j++) {
1725 all_iftypes |= limits[j].types; 1730 all_iftypes |= limits[j].types;
@@ -2072,3 +2077,26 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
2072 return max_vht_nss; 2077 return max_vht_nss;
2073} 2078}
2074EXPORT_SYMBOL(ieee80211_get_vht_max_nss); 2079EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
2080
2081bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
2082 bool is_4addr, u8 check_swif)
2083
2084{
2085 bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
2086
2087 switch (check_swif) {
2088 case 0:
2089 if (is_vlan && is_4addr)
2090 return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
2091 return wiphy->interface_modes & BIT(iftype);
2092 case 1:
2093 if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
2094 return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
2095 return wiphy->software_iftypes & BIT(iftype);
2096 default:
2097 break;
2098 }
2099
2100 return false;
2101}
2102EXPORT_SYMBOL(cfg80211_iftype_allowed);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 83de74ca729a..688aac7a6943 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
366 if (!umem->pages) { 366 if (!umem->pages) {
367 err = -ENOMEM; 367 err = -ENOMEM;
368 goto out_account; 368 goto out_pin;
369 } 369 }
370 370
371 for (i = 0; i < umem->npgs; i++) 371 for (i = 0; i < umem->npgs; i++)
@@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
373 373
374 return 0; 374 return 0;
375 375
376out_pin:
377 xdp_umem_unpin_pages(umem);
376out_account: 378out_account:
377 xdp_umem_unaccount_pages(umem); 379 xdp_umem_unaccount_pages(umem);
378 return err; 380 return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ca637a72697..ec94f5795ea4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3269 struct flowi4 *fl4 = &fl->u.ip4; 3269 struct flowi4 *fl4 = &fl->u.ip4;
3270 int oif = 0; 3270 int oif = 0;
3271 3271
3272 if (skb_dst(skb)) 3272 if (skb_dst(skb) && skb_dst(skb)->dev)
3273 oif = skb_dst(skb)->dev->ifindex; 3273 oif = skb_dst(skb)->dev->ifindex;
3274 3274
3275 memset(fl4, 0, sizeof(struct flowi4)); 3275 memset(fl4, 0, sizeof(struct flowi4));
@@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3387 3387
3388 nexthdr = nh[nhoff]; 3388 nexthdr = nh[nhoff];
3389 3389
3390 if (skb_dst(skb)) 3390 if (skb_dst(skb) && skb_dst(skb)->dev)
3391 oif = skb_dst(skb)->dev->ifindex; 3391 oif = skb_dst(skb)->dev->ifindex;
3392 3392
3393 memset(fl6, 0, sizeof(struct flowi6)); 3393 memset(fl6, 0, sizeof(struct flowi6));
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index 85571e90191f..bfeab44f81d0 100644
--- a/samples/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
245 245
246 if (argc != 2) { 246 if (argc != 2) {
247 printf( 247 printf(
248 "Sintax: %s fbdev\n" 248 "Syntax: %s fbdev\n"
249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]); 249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
250 return -1; 250 return -1;
251 } 251 }
diff --git a/samples/vfio-mdev/mdpy-defs.h b/samples/vfio-mdev/mdpy-defs.h
index 96b3b1b49d34..eb26421b6429 100644
--- a/samples/vfio-mdev/mdpy-defs.h
+++ b/samples/vfio-mdev/mdpy-defs.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Simple pci display device. 3 * Simple pci display device.
4 * 4 *
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 77c742fa4fb1..4b0432e095ae 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -190,9 +190,6 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\
190# printing commands 190# printing commands
191cmd = @set -e; $(echo-cmd) $(cmd_$(1)) 191cmd = @set -e; $(echo-cmd) $(cmd_$(1))
192 192
193# Add $(obj)/ for paths that are not absolute
194objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
195
196### 193###
197# if_changed - execute command if any prerequisite is newer than 194# if_changed - execute command if any prerequisite is newer than
198# target, or command line has changed 195# target, or command line has changed
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 8a5c4d645eb1..4bbf4fc163a2 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
25 25
26# $(cc-option,<flag>) 26# $(cc-option,<flag>)
27# Return y if the compiler supports <flag>, n otherwise 27# Return y if the compiler supports <flag>, n otherwise
28cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null) 28cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
29 29
30# $(ld-option,<flag>) 30# $(ld-option,<flag>)
31# Return y if the linker supports <flag>, n otherwise 31# Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 0d434d0afc0b..2f66ed388d1c 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -52,6 +52,13 @@ ifndef obj
52$(warning kbuild: Makefile.build is included improperly) 52$(warning kbuild: Makefile.build is included improperly)
53endif 53endif
54 54
55ifeq ($(MAKECMDGOALS)$(need-modorder),)
56ifneq ($(obj-m),)
57$(warning $(patsubst %.o,'%.ko',$(obj-m)) will not be built even though obj-m is specified.)
58$(warning You cannot use subdir-y/m to visit a module Makefile. Use obj-y/m instead.)
59endif
60endif
61
55# =========================================================================== 62# ===========================================================================
56 63
57ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),) 64ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),)
@@ -487,7 +494,9 @@ targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \
487 494
488PHONY += $(subdir-ym) 495PHONY += $(subdir-ym)
489$(subdir-ym): 496$(subdir-ym):
490 $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1) 497 $(Q)$(MAKE) $(build)=$@ \
498 need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) \
499 need-modorder=$(if $(need-modorder),$(if $(filter $@/modules.order, $(modorder)),1))
491 500
492# Add FORCE to the prequisites of a target to force it to be always rebuilt. 501# Add FORCE to the prequisites of a target to force it to be always rebuilt.
493# --------------------------------------------------------------------------- 502# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 5241d0751eb0..41c50f9461e5 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -45,7 +45,6 @@ subdir-ym := $(sort $(subdir-y) $(subdir-m))
45multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m)))) 45multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m))))
46multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m)))) 46multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m))))
47multi-used := $(multi-used-y) $(multi-used-m) 47multi-used := $(multi-used-y) $(multi-used-m)
48single-used-m := $(sort $(filter-out $(multi-used-m),$(obj-m)))
49 48
50# $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to 49# $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to
51# tell kbuild to descend 50# tell kbuild to descend
@@ -91,7 +90,6 @@ lib-y := $(addprefix $(obj)/,$(lib-y))
91subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y)) 90subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y))
92real-obj-y := $(addprefix $(obj)/,$(real-obj-y)) 91real-obj-y := $(addprefix $(obj)/,$(real-obj-y))
93real-obj-m := $(addprefix $(obj)/,$(real-obj-m)) 92real-obj-m := $(addprefix $(obj)/,$(real-obj-m))
94single-used-m := $(addprefix $(obj)/,$(single-used-m))
95multi-used-m := $(addprefix $(obj)/,$(multi-used-m)) 93multi-used-m := $(addprefix $(obj)/,$(multi-used-m))
96subdir-ym := $(addprefix $(obj)/,$(subdir-ym)) 94subdir-ym := $(addprefix $(obj)/,$(subdir-ym))
97 95
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 6b19c1a4eae5..26e6574ecd08 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -38,12 +38,39 @@
38# symbols in the final module linking stage 38# symbols in the final module linking stage
39# KBUILD_MODPOST_NOFINAL can be set to skip the final link of modules. 39# KBUILD_MODPOST_NOFINAL can be set to skip the final link of modules.
40# This is solely useful to speed up test compiles 40# This is solely useful to speed up test compiles
41PHONY := _modpost 41
42_modpost: __modpost 42PHONY := __modpost
43__modpost:
43 44
44include include/config/auto.conf 45include include/config/auto.conf
45include scripts/Kbuild.include 46include scripts/Kbuild.include
46 47
48kernelsymfile := $(objtree)/Module.symvers
49modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
50
51MODPOST = scripts/mod/modpost \
52 $(if $(CONFIG_MODVERSIONS),-m) \
53 $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
54 $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
55 $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
56 $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
57 $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
58 $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
59 $(if $(KBUILD_MODPOST_WARN),-w)
60
61ifdef MODPOST_VMLINUX
62
63__modpost: vmlinux.o
64
65quiet_cmd_modpost = MODPOST $@
66 cmd_modpost = $(MODPOST) $@
67
68PHONY += vmlinux.o
69vmlinux.o:
70 $(call cmd,modpost)
71
72else
73
47# When building external modules load the Kbuild file to retrieve EXTRA_SYMBOLS info 74# When building external modules load the Kbuild file to retrieve EXTRA_SYMBOLS info
48ifneq ($(KBUILD_EXTMOD),) 75ifneq ($(KBUILD_EXTMOD),)
49 76
@@ -58,50 +85,25 @@ endif
58 85
59include scripts/Makefile.lib 86include scripts/Makefile.lib
60 87
61kernelsymfile := $(objtree)/Module.symvers 88# find all modules listed in modules.order
62modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers 89modules := $(sort $(shell cat $(MODORDER)))
63
64modorder := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order
65
66# Step 1), find all modules listed in modules.order
67ifdef CONFIG_MODULES
68modules := $(sort $(shell cat $(modorder)))
69endif
70 90
71# Stop after building .o files if NOFINAL is set. Makes compile tests quicker 91# Stop after building .o files if NOFINAL is set. Makes compile tests quicker
72_modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) 92__modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
73 93 @:
74# Step 2), invoke modpost
75# Includes step 3,4
76modpost = scripts/mod/modpost \
77 $(if $(CONFIG_MODVERSIONS),-m) \
78 $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
79 $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
80 $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
81 $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
82 $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
83 $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
84 $(if $(KBUILD_MODPOST_WARN),-w)
85
86MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS)))
87 94
88# We can go over command line length here, so be careful. 95MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux)
89quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
90 cmd_modpost = sed 's/ko$$/o/' $(modorder) | $(modpost) $(MODPOST_OPT) -s -T -
91
92PHONY += __modpost
93__modpost: $(modules:.ko=.o) FORCE
94 $(call cmd,modpost) $(wildcard vmlinux)
95 96
96quiet_cmd_kernel-mod = MODPOST $@ 97# We can go over command line length here, so be careful.
97 cmd_kernel-mod = $(modpost) $@ 98quiet_cmd_modpost = MODPOST $(words $(modules)) modules
99 cmd_modpost = sed 's/ko$$/o/' $(MODORDER) | $(MODPOST)
98 100
99vmlinux.o: FORCE 101PHONY += modules-modpost
100 $(call cmd,kernel-mod) 102modules-modpost:
103 $(call cmd,modpost)
101 104
102# Declare generated files as targets for modpost 105# Declare generated files as targets for modpost
103$(modules:.ko=.mod.c): __modpost ; 106$(modules:.ko=.mod.c): modules-modpost
104
105 107
106# Step 5), compile all *.mod.c files 108# Step 5), compile all *.mod.c files
107 109
@@ -145,10 +147,10 @@ FORCE:
145# optimization, we don't need to read them if the target does not 147# optimization, we don't need to read them if the target does not
146# exist, we will rebuild anyway in that case. 148# exist, we will rebuild anyway in that case.
147 149
148cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd)) 150existing-targets := $(wildcard $(sort $(targets)))
151
152-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
149 153
150ifneq ($(cmd_files),)
151 include $(cmd_files)
152endif 154endif
153 155
154.PHONY: $(PHONY) 156.PHONY: $(PHONY)
diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci b/scripts/coccinelle/api/atomic_as_refcounter.cocci
index 988120e0fd67..0f78d94abc35 100644
--- a/scripts/coccinelle/api/atomic_as_refcounter.cocci
+++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1// Check if refcount_t type and API should be used 2// Check if refcount_t type and API should be used
2// instead of atomic_t type when dealing with refcounters 3// instead of atomic_t type when dealing with refcounters
3// 4//
diff --git a/scripts/gen_compile_commands.py b/scripts/gen_compile_commands.py
index 7915823b92a5..c458696ef3a7 100755
--- a/scripts/gen_compile_commands.py
+++ b/scripts/gen_compile_commands.py
@@ -21,9 +21,9 @@ _LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$'
21_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] 21_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
22 22
23# A kernel build generally has over 2000 entries in its compile_commands.json 23# A kernel build generally has over 2000 entries in its compile_commands.json
24# database. If this code finds 500 or fewer, then warn the user that they might 24# database. If this code finds 300 or fewer, then warn the user that they might
25# not have all the .cmd files, and they might need to compile the kernel. 25# not have all the .cmd files, and they might need to compile the kernel.
26_LOW_COUNT_THRESHOLD = 500 26_LOW_COUNT_THRESHOLD = 300
27 27
28 28
29def parse_arguments(): 29def parse_arguments():
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 47f6f3ea0771..bbaf29386995 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -23,6 +23,12 @@ TMPFILE=$OUTFILE.tmp
23 23
24trap 'rm -f $OUTFILE $TMPFILE' EXIT 24trap 'rm -f $OUTFILE $TMPFILE' EXIT
25 25
26# SPDX-License-Identifier with GPL variants must have "WITH Linux-syscall-note"
27if [ -n "$(sed -n -e "/SPDX-License-Identifier:.*GPL-/{/WITH Linux-syscall-note/!p}" $INFILE)" ]; then
28 echo "error: $INFILE: missing \"WITH Linux-syscall-note\" for SPDX-License-Identifier" >&2
29 exit 1
30fi
31
26sed -E -e ' 32sed -E -e '
27 s/([[:space:](])(__user|__force|__iomem)[[:space:]]/\1/g 33 s/([[:space:](])(__user|__force|__iomem)[[:space:]]/\1/g
28 s/__attribute_const__([[:space:]]|$)/\1/g 34 s/__attribute_const__([[:space:]]|$)/\1/g
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 1134892599da..3569d2dec37c 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -848,6 +848,7 @@ int conf_write(const char *name)
848 const char *str; 848 const char *str;
849 char tmpname[PATH_MAX + 1], oldname[PATH_MAX + 1]; 849 char tmpname[PATH_MAX + 1], oldname[PATH_MAX + 1];
850 char *env; 850 char *env;
851 int i;
851 bool need_newline = false; 852 bool need_newline = false;
852 853
853 if (!name) 854 if (!name)
@@ -930,6 +931,9 @@ next:
930 } 931 }
931 fclose(out); 932 fclose(out);
932 933
934 for_all_symbols(i, sym)
935 sym->flags &= ~SYMBOL_WRITTEN;
936
933 if (*tmpname) { 937 if (*tmpname) {
934 if (is_same(name, tmpname)) { 938 if (is_same(name, tmpname)) {
935 conf_message("No change to %s", name); 939 conf_message("No change to %s", name);
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index a7124f895b24..915775eb2921 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -210,7 +210,7 @@ info LD vmlinux.o
210modpost_link vmlinux.o 210modpost_link vmlinux.o
211 211
212# modpost vmlinux.o to check for section mismatches 212# modpost vmlinux.o to check for section mismatches
213${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o 213${MAKE} -f "${srctree}/scripts/Makefile.modpost" MODPOST_VMLINUX=1
214 214
215info MODINFO modules.builtin.modinfo 215info MODINFO modules.builtin.modinfo
216${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo 216${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index f230e65329a2..3b638c0e1a4f 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -83,6 +83,17 @@ sub check_missing(%)
83 foreach my $prog (sort keys %missing) { 83 foreach my $prog (sort keys %missing) {
84 my $is_optional = $missing{$prog}; 84 my $is_optional = $missing{$prog};
85 85
86 # At least on some LTS distros like CentOS 7, texlive doesn't
87 # provide all packages we need. When such distros are
88 # detected, we have to disable PDF output.
89 #
90 # So, we need to ignore the packages that distros would
91 # need for LaTeX to work
92 if ($is_optional == 2 && !$pdf) {
93 $optional--;
94 next;
95 }
96
86 if ($is_optional) { 97 if ($is_optional) {
87 print "Warning: better to also install \"$prog\".\n"; 98 print "Warning: better to also install \"$prog\".\n";
88 } else { 99 } else {
@@ -333,10 +344,13 @@ sub give_debian_hints()
333 344
334 if ($pdf) { 345 if ($pdf) {
335 check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 346 check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
336 "fonts-dejavu", 1); 347 "fonts-dejavu", 2);
348
349 check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
350 "fonts-noto-cjk", 2);
337 } 351 }
338 352
339 check_program("dvipng", 1) if ($pdf); 353 check_program("dvipng", 2) if ($pdf);
340 check_missing(\%map); 354 check_missing(\%map);
341 355
342 return if (!$need && !$optional); 356 return if (!$need && !$optional);
@@ -363,6 +377,7 @@ sub give_redhat_hints()
363 my @fedora_tex_pkgs = ( 377 my @fedora_tex_pkgs = (
364 "texlive-collection-fontsrecommended", 378 "texlive-collection-fontsrecommended",
365 "texlive-collection-latex", 379 "texlive-collection-latex",
380 "texlive-xecjk",
366 "dejavu-sans-fonts", 381 "dejavu-sans-fonts",
367 "dejavu-serif-fonts", 382 "dejavu-serif-fonts",
368 "dejavu-sans-mono-fonts", 383 "dejavu-sans-mono-fonts",
@@ -371,22 +386,45 @@ sub give_redhat_hints()
371 # 386 #
372 # Checks valid for RHEL/CentOS version 7.x. 387 # Checks valid for RHEL/CentOS version 7.x.
373 # 388 #
374 if (! $system_release =~ /Fedora/) { 389 my $old = 0;
390 my $rel;
391 $rel = $1 if ($system_release =~ /release\s+(\d+)/);
392
393 if (!($system_release =~ /Fedora/)) {
375 $map{"virtualenv"} = "python-virtualenv"; 394 $map{"virtualenv"} = "python-virtualenv";
376 }
377 395
378 my $release; 396 if ($rel && $rel < 8) {
397 $old = 1;
398 $pdf = 0;
399
400 printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n");
401 printf("If you want to build PDF, please read:\n");
402 printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n");
403 }
404 } else {
405 if ($rel && $rel < 26) {
406 $old = 1;
407 }
408 }
409 if (!$rel) {
410 printf("Couldn't identify release number\n");
411 $old = 1;
412 $pdf = 0;
413 }
379 414
380 $release = $1 if ($system_release =~ /Fedora\s+release\s+(\d+)/); 415 if ($pdf) {
416 check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
417 "google-noto-sans-cjk-ttc-fonts", 2);
418 }
381 419
382 check_rpm_missing(\@fedora26_opt_pkgs, 1) if ($pdf && $release >= 26); 420 check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old);
383 check_rpm_missing(\@fedora_tex_pkgs, 1) if ($pdf); 421 check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf);
384 check_missing_tex(1) if ($pdf); 422 check_missing_tex(2) if ($pdf);
385 check_missing(\%map); 423 check_missing(\%map);
386 424
387 return if (!$need && !$optional); 425 return if (!$need && !$optional);
388 426
389 if ($release >= 18) { 427 if (!$old) {
390 # dnf, for Fedora 18+ 428 # dnf, for Fedora 18+
391 printf("You should run:\n\n\tsudo dnf install -y $install\n"); 429 printf("You should run:\n\n\tsudo dnf install -y $install\n");
392 } else { 430 } else {
@@ -425,8 +463,15 @@ sub give_opensuse_hints()
425 "texlive-zapfding", 463 "texlive-zapfding",
426 ); 464 );
427 465
428 check_rpm_missing(\@suse_tex_pkgs, 1) if ($pdf); 466 $map{"latexmk"} = "texlive-latexmk-bin";
429 check_missing_tex(1) if ($pdf); 467
468 # FIXME: add support for installing CJK fonts
469 #
470 # I tried hard, but was unable to find a way to install
471 # "Noto Sans CJK SC" on openSUSE
472
473 check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf);
474 check_missing_tex(2) if ($pdf);
430 check_missing(\%map); 475 check_missing(\%map);
431 476
432 return if (!$need && !$optional); 477 return if (!$need && !$optional);
@@ -450,7 +495,14 @@ sub give_mageia_hints()
450 "texlive-fontsextra", 495 "texlive-fontsextra",
451 ); 496 );
452 497
453 check_rpm_missing(\@tex_pkgs, 1) if ($pdf); 498 $map{"latexmk"} = "texlive-collection-basic";
499
500 if ($pdf) {
501 check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
502 "google-noto-sans-cjk-ttc-fonts", 2);
503 }
504
505 check_rpm_missing(\@tex_pkgs, 2) if ($pdf);
454 check_missing(\%map); 506 check_missing(\%map);
455 507
456 return if (!$need && !$optional); 508 return if (!$need && !$optional);
@@ -473,7 +525,13 @@ sub give_arch_linux_hints()
473 "texlive-latexextra", 525 "texlive-latexextra",
474 "ttf-dejavu", 526 "ttf-dejavu",
475 ); 527 );
476 check_pacman_missing(\@archlinux_tex_pkgs, 1) if ($pdf); 528 check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
529
530 if ($pdf) {
531 check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
532 "noto-fonts-cjk", 2);
533 }
534
477 check_missing(\%map); 535 check_missing(\%map);
478 536
479 return if (!$need && !$optional); 537 return if (!$need && !$optional);
@@ -492,15 +550,31 @@ sub give_gentoo_hints()
492 ); 550 );
493 551
494 check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf", 552 check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf",
495 "media-fonts/dejavu", 1) if ($pdf); 553 "media-fonts/dejavu", 2) if ($pdf);
554
555 if ($pdf) {
556 check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
557 "media-fonts/noto-cjk", 2);
558 }
496 559
497 check_missing(\%map); 560 check_missing(\%map);
498 561
499 return if (!$need && !$optional); 562 return if (!$need && !$optional);
500 563
501 printf("You should run:\n\n"); 564 printf("You should run:\n\n");
502 printf("\tsudo su -c 'echo \"media-gfx/imagemagick svg png\" > /etc/portage/package.use/imagemagick'\n"); 565
503 printf("\tsudo su -c 'echo \"media-gfx/graphviz cairo pdf\" > /etc/portage/package.use/graphviz'\n"); 566 my $imagemagick = "media-gfx/imagemagick svg png";
567 my $cairo = "media-gfx/graphviz cairo pdf";
568 my $portage_imagemagick = "/etc/portage/package.use/imagemagick";
569 my $portage_cairo = "/etc/portage/package.use/graphviz";
570
571 if (qx(cat $portage_imagemagick) ne "$imagemagick\n") {
572 printf("\tsudo su -c 'echo \"$imagemagick\" > $portage_imagemagick'\n")
573 }
574 if (qx(cat $portage_cairo) ne "$cairo\n") {
575 printf("\tsudo su -c 'echo \"$cairo\" > $portage_cairo'\n");
576 }
577
504 printf("\tsudo emerge --ask $install\n"); 578 printf("\tsudo emerge --ask $install\n");
505 579
506} 580}
@@ -560,7 +634,7 @@ sub check_distros()
560 my %map = ( 634 my %map = (
561 "sphinx-build" => "sphinx" 635 "sphinx-build" => "sphinx"
562 ); 636 );
563 check_missing_tex(1) if ($pdf); 637 check_missing_tex(2) if ($pdf);
564 check_missing(\%map); 638 check_missing(\%map);
565 print "I don't know distro $system_release.\n"; 639 print "I don't know distro $system_release.\n";
566 print "So, I can't provide you a hint with the install procedure.\n"; 640 print "So, I can't provide you a hint with the install procedure.\n";
@@ -589,11 +663,13 @@ sub check_needs()
589 check_program("make", 0); 663 check_program("make", 0);
590 check_program("gcc", 0); 664 check_program("gcc", 0);
591 check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv); 665 check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv);
592 check_program("xelatex", 1) if ($pdf);
593 check_program("dot", 1); 666 check_program("dot", 1);
594 check_program("convert", 1); 667 check_program("convert", 1);
595 check_program("rsvg-convert", 1) if ($pdf); 668
596 check_program("latexmk", 1) if ($pdf); 669 # Extra PDF files - should use 2 for is_optional
670 check_program("xelatex", 2) if ($pdf);
671 check_program("rsvg-convert", 2) if ($pdf);
672 check_program("latexmk", 2) if ($pdf);
597 673
598 check_distros(); 674 check_distros();
599 675
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index a1ffe2eb4d5f..af4c979b38ee 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -61,6 +61,7 @@ choice
61 config GCC_PLUGIN_STRUCTLEAK_BYREF 61 config GCC_PLUGIN_STRUCTLEAK_BYREF
62 bool "zero-init structs passed by reference (strong)" 62 bool "zero-init structs passed by reference (strong)"
63 depends on GCC_PLUGINS 63 depends on GCC_PLUGINS
64 depends on !(KASAN && KASAN_STACK=1)
64 select GCC_PLUGIN_STRUCTLEAK 65 select GCC_PLUGIN_STRUCTLEAK
65 help 66 help
66 Zero-initialize any structures on the stack that may 67 Zero-initialize any structures on the stack that may
@@ -70,9 +71,15 @@ choice
70 exposures, like CVE-2017-1000410: 71 exposures, like CVE-2017-1000410:
71 https://git.kernel.org/linus/06e7e776ca4d3654 72 https://git.kernel.org/linus/06e7e776ca4d3654
72 73
74 As a side-effect, this keeps a lot of variables on the
75 stack that can otherwise be optimized out, so combining
76 this with CONFIG_KASAN_STACK can lead to a stack overflow
77 and is disallowed.
78
73 config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL 79 config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
74 bool "zero-init anything passed by reference (very strong)" 80 bool "zero-init anything passed by reference (very strong)"
75 depends on GCC_PLUGINS 81 depends on GCC_PLUGINS
82 depends on !(KASAN && KASAN_STACK=1)
76 select GCC_PLUGIN_STRUCTLEAK 83 select GCC_PLUGIN_STRUCTLEAK
77 help 84 help
78 Zero-initialize any stack variables that may be passed 85 Zero-initialize any stack variables that may be passed
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 7325f382dbf4..957b9e3e1492 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type,
595 595
596 key = check_cached_key(&ctx); 596 key = check_cached_key(&ctx);
597 if (key) 597 if (key)
598 return key; 598 goto error_free;
599 599
600 /* search all the process keyrings for a key */ 600 /* search all the process keyrings for a key */
601 rcu_read_lock(); 601 rcu_read_lock();
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 9a94672e7adc..ade699131065 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1228,24 +1228,11 @@ hashalg_fail:
1228 1228
1229static int __init init_digests(void) 1229static int __init init_digests(void)
1230{ 1230{
1231 u8 digest[TPM_MAX_DIGEST_SIZE];
1232 int ret;
1233 int i;
1234
1235 ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
1236 if (ret < 0)
1237 return ret;
1238 if (ret < TPM_MAX_DIGEST_SIZE)
1239 return -EFAULT;
1240
1241 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), 1231 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
1242 GFP_KERNEL); 1232 GFP_KERNEL);
1243 if (!digests) 1233 if (!digests)
1244 return -ENOMEM; 1234 return -ENOMEM;
1245 1235
1246 for (i = 0; i < chip->nr_allocated_banks; i++)
1247 memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
1248
1249 return 0; 1236 return 0;
1250} 1237}
1251 1238
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 624ccc6ac744..f8efaa9f647c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
272 return v; 272 return v;
273} 273}
274 274
275static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
276
275/* 277/*
276 * Initialize a policy database structure. 278 * Initialize a policy database structure.
277 */ 279 */
@@ -319,8 +321,10 @@ static int policydb_init(struct policydb *p)
319out: 321out:
320 hashtab_destroy(p->filename_trans); 322 hashtab_destroy(p->filename_trans);
321 hashtab_destroy(p->range_tr); 323 hashtab_destroy(p->range_tr);
322 for (i = 0; i < SYM_NUM; i++) 324 for (i = 0; i < SYM_NUM; i++) {
325 hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
323 hashtab_destroy(p->symtab[i].table); 326 hashtab_destroy(p->symtab[i].table);
327 }
324 return rc; 328 return rc;
325} 329}
326 330
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e63a90ff2728..1f0a6eaa2d6a 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -286,6 +286,11 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
286 ++count; 286 ++count;
287 } 287 }
288 288
289 /* bail out if we already reached max entries */
290 rc = -EOVERFLOW;
291 if (count >= SIDTAB_MAX)
292 goto out_unlock;
293
289 /* insert context into new entry */ 294 /* insert context into new entry */
290 rc = -ENOMEM; 295 rc = -ENOMEM;
291 dst = sidtab_do_lookup(s, count, 1); 296 dst = sidtab_do_lookup(s, count, 1);
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 7b977b753a03..7985dd8198b6 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -122,17 +122,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
122 vendor_id); 122 vendor_id);
123 123
124 ret = device_add(&codec->dev); 124 ret = device_add(&codec->dev);
125 if (ret) 125 if (ret) {
126 goto err_free_codec; 126 put_device(&codec->dev);
127 return ret;
128 }
127 129
128 return 0; 130 return 0;
129err_free_codec:
130 of_node_put(codec->dev.of_node);
131 put_device(&codec->dev);
132 kfree(codec);
133 ac97_ctrl->codecs[idx] = NULL;
134
135 return ret;
136} 131}
137 132
138unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv, 133unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 99b882158705..41905afada63 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -574,10 +574,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
574 stream->metadata_set = false; 574 stream->metadata_set = false;
575 stream->next_track = false; 575 stream->next_track = false;
576 576
577 if (stream->direction == SND_COMPRESS_PLAYBACK) 577 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
578 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
579 else
580 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
581 } else { 578 } else {
582 return -EPERM; 579 return -EPERM;
583 } 580 }
@@ -693,8 +690,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
693{ 690{
694 int retval; 691 int retval;
695 692
696 if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED) 693 switch (stream->runtime->state) {
694 case SNDRV_PCM_STATE_SETUP:
695 if (stream->direction != SND_COMPRESS_CAPTURE)
696 return -EPERM;
697 break;
698 case SNDRV_PCM_STATE_PREPARED:
699 break;
700 default:
697 return -EPERM; 701 return -EPERM;
702 }
703
698 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START); 704 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
699 if (!retval) 705 if (!retval)
700 stream->runtime->state = SNDRV_PCM_STATE_RUNNING; 706 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -705,9 +711,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
705{ 711{
706 int retval; 712 int retval;
707 713
708 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || 714 switch (stream->runtime->state) {
709 stream->runtime->state == SNDRV_PCM_STATE_SETUP) 715 case SNDRV_PCM_STATE_OPEN:
716 case SNDRV_PCM_STATE_SETUP:
717 case SNDRV_PCM_STATE_PREPARED:
710 return -EPERM; 718 return -EPERM;
719 default:
720 break;
721 }
722
711 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); 723 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
712 if (!retval) { 724 if (!retval) {
713 snd_compr_drain_notify(stream); 725 snd_compr_drain_notify(stream);
@@ -795,9 +807,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
795{ 807{
796 int retval; 808 int retval;
797 809
798 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || 810 switch (stream->runtime->state) {
799 stream->runtime->state == SNDRV_PCM_STATE_SETUP) 811 case SNDRV_PCM_STATE_OPEN:
812 case SNDRV_PCM_STATE_SETUP:
813 case SNDRV_PCM_STATE_PREPARED:
814 case SNDRV_PCM_STATE_PAUSED:
800 return -EPERM; 815 return -EPERM;
816 case SNDRV_PCM_STATE_XRUN:
817 return -EPIPE;
818 default:
819 break;
820 }
801 821
802 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN); 822 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
803 if (retval) { 823 if (retval) {
@@ -817,6 +837,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
817 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) 837 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
818 return -EPERM; 838 return -EPERM;
819 839
840 /* next track doesn't have any meaning for capture streams */
841 if (stream->direction == SND_COMPRESS_CAPTURE)
842 return -EPERM;
843
820 /* you can signal next track if this is intended to be a gapless stream 844 /* you can signal next track if this is intended to be a gapless stream
821 * and current track metadata is set 845 * and current track metadata is set
822 */ 846 */
@@ -834,9 +858,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
834static int snd_compr_partial_drain(struct snd_compr_stream *stream) 858static int snd_compr_partial_drain(struct snd_compr_stream *stream)
835{ 859{
836 int retval; 860 int retval;
837 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED || 861
838 stream->runtime->state == SNDRV_PCM_STATE_SETUP) 862 switch (stream->runtime->state) {
863 case SNDRV_PCM_STATE_OPEN:
864 case SNDRV_PCM_STATE_SETUP:
865 case SNDRV_PCM_STATE_PREPARED:
866 case SNDRV_PCM_STATE_PAUSED:
867 return -EPERM;
868 case SNDRV_PCM_STATE_XRUN:
869 return -EPIPE;
870 default:
871 break;
872 }
873
874 /* partial drain doesn't have any meaning for capture streams */
875 if (stream->direction == SND_COMPRESS_CAPTURE)
839 return -EPERM; 876 return -EPERM;
877
840 /* stream can be drained only when next track has been signalled */ 878 /* stream can be drained only when next track has been signalled */
841 if (stream->next_track == false) 879 if (stream->next_track == false)
842 return -EPERM; 880 return -EPERM;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 860543a4c840..703857aab00f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
77 spin_lock_init(&group->lock); 77 spin_lock_init(&group->lock);
78 mutex_init(&group->mutex); 78 mutex_init(&group->mutex);
79 INIT_LIST_HEAD(&group->substreams); 79 INIT_LIST_HEAD(&group->substreams);
80 refcount_set(&group->refs, 0); 80 refcount_set(&group->refs, 1);
81} 81}
82 82
83/* define group lock helpers */ 83/* define group lock helpers */
@@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group,
1096 1096
1097 if (!group) 1097 if (!group)
1098 return; 1098 return;
1099 do_free = refcount_dec_and_test(&group->refs) && 1099 do_free = refcount_dec_and_test(&group->refs);
1100 list_empty(&group->substreams);
1101 snd_pcm_group_unlock(group, substream->pcm->nonatomic); 1100 snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1102 if (do_free) 1101 if (do_free)
1103 kfree(group); 1102 kfree(group);
@@ -1874,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
1874 if (!to_check) 1873 if (!to_check)
1875 break; /* all drained */ 1874 break; /* all drained */
1876 init_waitqueue_entry(&wait, current); 1875 init_waitqueue_entry(&wait, current);
1876 set_current_state(TASK_INTERRUPTIBLE);
1877 add_wait_queue(&to_check->sleep, &wait); 1877 add_wait_queue(&to_check->sleep, &wait);
1878 snd_pcm_stream_unlock_irq(substream); 1878 snd_pcm_stream_unlock_irq(substream);
1879 if (runtime->no_period_wakeup) 1879 if (runtime->no_period_wakeup)
@@ -1886,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
1886 } 1886 }
1887 tout = msecs_to_jiffies(tout * 1000); 1887 tout = msecs_to_jiffies(tout * 1000);
1888 } 1888 }
1889 tout = schedule_timeout_interruptible(tout); 1889 tout = schedule_timeout(tout);
1890 1890
1891 snd_pcm_stream_lock_irq(substream); 1891 snd_pcm_stream_lock_irq(substream);
1892 group = snd_pcm_stream_group_ref(substream); 1892 group = snd_pcm_stream_group_ref(substream);
@@ -2020,6 +2020,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
2020 snd_pcm_group_lock_irq(target_group, nonatomic); 2020 snd_pcm_group_lock_irq(target_group, nonatomic);
2021 snd_pcm_stream_lock(substream1); 2021 snd_pcm_stream_lock(substream1);
2022 snd_pcm_group_assign(substream1, target_group); 2022 snd_pcm_group_assign(substream1, target_group);
2023 refcount_inc(&target_group->refs);
2023 snd_pcm_stream_unlock(substream1); 2024 snd_pcm_stream_unlock(substream1);
2024 snd_pcm_group_unlock_irq(target_group, nonatomic); 2025 snd_pcm_group_unlock_irq(target_group, nonatomic);
2025 _end: 2026 _end:
@@ -2056,13 +2057,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2056 snd_pcm_group_lock_irq(group, nonatomic); 2057 snd_pcm_group_lock_irq(group, nonatomic);
2057 2058
2058 relink_to_local(substream); 2059 relink_to_local(substream);
2060 refcount_dec(&group->refs);
2059 2061
2060 /* detach the last stream, too */ 2062 /* detach the last stream, too */
2061 if (list_is_singular(&group->substreams)) { 2063 if (list_is_singular(&group->substreams)) {
2062 relink_to_local(list_first_entry(&group->substreams, 2064 relink_to_local(list_first_entry(&group->substreams,
2063 struct snd_pcm_substream, 2065 struct snd_pcm_substream,
2064 link_list)); 2066 link_list));
2065 do_free = !refcount_read(&group->refs); 2067 do_free = refcount_dec_and_test(&group->refs);
2066 } 2068 }
2067 2069
2068 snd_pcm_group_unlock_irq(group, nonatomic); 2070 snd_pcm_group_unlock_irq(group, nonatomic);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 7737b2670064..6d9592f0ae1d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1835 if (cptr->type == USER_CLIENT) { 1835 if (cptr->type == USER_CLIENT) {
1836 info->input_pool = cptr->data.user.fifo_pool_size; 1836 info->input_pool = cptr->data.user.fifo_pool_size;
1837 info->input_free = info->input_pool; 1837 info->input_free = info->input_pool;
1838 if (cptr->data.user.fifo) 1838 info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1839 info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
1840 } else { 1839 } else {
1841 info->input_pool = 0; 1840 info->input_pool = 0;
1842 info->input_free = 0; 1841 info->input_free = 0;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index ea69261f269a..eaaa8b5830bb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
263 263
264 return 0; 264 return 0;
265} 265}
266
267/* get the number of unused cells safely */
268int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
269{
270 unsigned long flags;
271 int cells;
272
273 if (!f)
274 return 0;
275
276 snd_use_lock_use(&f->use_lock);
277 spin_lock_irqsave(&f->lock, flags);
278 cells = snd_seq_unused_cells(f->pool);
279 spin_unlock_irqrestore(&f->lock, flags);
280 snd_use_lock_free(&f->use_lock);
281 return cells;
282}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index edc68743943d..b56a7b897c9c 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
53/* resize pool in fifo */ 53/* resize pool in fifo */
54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize); 54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
55 55
56/* get the number of unused cells safely */
57int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
56 58
57#endif 59#endif
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9ea39348cdf5..7c6d1c277d4d 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
248 unsigned int channels = params_channels(hw_params); 248 unsigned int channels = params_channels(hw_params);
249 249
250 mutex_lock(&oxfw->mutex); 250 mutex_lock(&oxfw->mutex);
251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
252 rate, channels); 252 rate, channels);
253 if (err >= 0) 253 if (err >= 0)
254 ++oxfw->substreams_count; 254 ++oxfw->substreams_count;
diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
index 0d35359d25cd..0ecafd0c6722 100644
--- a/sound/firewire/packets-buffer.c
+++ b/sound/firewire/packets-buffer.c
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
37 packets_per_page = PAGE_SIZE / packet_size; 37 packets_per_page = PAGE_SIZE / packet_size;
38 if (WARN_ON(!packets_per_page)) { 38 if (WARN_ON(!packets_per_page)) {
39 err = -EINVAL; 39 err = -EINVAL;
40 goto error; 40 goto err_packets;
41 } 41 }
42 pages = DIV_ROUND_UP(count, packets_per_page); 42 pages = DIV_ROUND_UP(count, packets_per_page);
43 43
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 1192c7561d62..3c2db3816029 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
136 if (!acomp) 136 if (!acomp)
137 return -ENODEV; 137 return -ENODEV;
138 if (!acomp->ops) { 138 if (!acomp->ops) {
139 request_module("i915"); 139 if (!IS_ENABLED(CONFIG_MODULES) ||
140 /* 60s timeout */ 140 !request_module("i915")) {
141 wait_for_completion_timeout(&bind_complete, 141 /* 60s timeout */
142 msecs_to_jiffies(60 * 1000)); 142 wait_for_completion_timeout(&bind_complete,
143 msecs_to_jiffies(60 * 1000));
144 }
143 } 145 }
144 if (!acomp->ops) { 146 if (!acomp->ops) {
145 dev_info(bus->dev, "couldn't bind with audio component\n"); 147 dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e30e86ca6b72..51f10ed9bc43 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2942,7 +2942,7 @@ static int hda_codec_runtime_resume(struct device *dev)
2942static int hda_codec_force_resume(struct device *dev) 2942static int hda_codec_force_resume(struct device *dev)
2943{ 2943{
2944 struct hda_codec *codec = dev_to_hda_codec(dev); 2944 struct hda_codec *codec = dev_to_hda_codec(dev);
2945 bool forced_resume = !codec->relaxed_resume; 2945 bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used;
2946 int ret; 2946 int ret;
2947 2947
2948 /* The get/put pair below enforces the runtime resume even if the 2948 /* The get/put pair below enforces the runtime resume even if the
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index c8d1b4316245..48d863736b3c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -598,11 +598,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
598 } 598 }
599 runtime->private_data = azx_dev; 599 runtime->private_data = azx_dev;
600 600
601 if (chip->gts_present)
602 azx_pcm_hw.info = azx_pcm_hw.info |
603 SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
604
605 runtime->hw = azx_pcm_hw; 601 runtime->hw = azx_pcm_hw;
602 if (chip->gts_present)
603 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
606 runtime->hw.channels_min = hinfo->channels_min; 604 runtime->hw.channels_min = hinfo->channels_min;
607 runtime->hw.channels_max = hinfo->channels_max; 605 runtime->hw.channels_max = hinfo->channels_max;
608 runtime->hw.formats = hinfo->formats; 606 runtime->hw.formats = hinfo->formats;
@@ -615,6 +613,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
615 20, 613 20,
616 178000000); 614 178000000);
617 615
616 /* by some reason, the playback stream stalls on PulseAudio with
617 * tsched=1 when a capture stream triggers. Until we figure out the
618 * real cause, disable tsched mode by telling the PCM info flag.
619 */
620 if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
621 runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
622
618 if (chip->align_buffer_size) 623 if (chip->align_buffer_size)
619 /* constrain buffer sizes to be multiple of 128 624 /* constrain buffer sizes to be multiple of 128
620 bytes. This is more efficient in terms of memory 625 bytes. This is more efficient in terms of memory
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index baa15374fbcb..f2a6df5e6bcb 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -31,7 +31,7 @@
31/* 14 unused */ 31/* 14 unused */
32#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */ 32#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
33#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */ 33#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
34/* 17 unused */ 34#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
35#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */ 35#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
36#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */ 36#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
37#define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */ 37#define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 485edaba0037..5bf24fb819d2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
6051} 6051}
6052EXPORT_SYMBOL_GPL(snd_hda_gen_free); 6052EXPORT_SYMBOL_GPL(snd_hda_gen_free);
6053 6053
6054/**
6055 * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
6056 * @codec: the HDA codec
6057 *
6058 * This can be put as patch_ops reboot_notify function.
6059 */
6060void snd_hda_gen_reboot_notify(struct hda_codec *codec)
6061{
6062 /* Make the codec enter D3 to avoid spurious noises from the internal
6063 * speaker during (and after) reboot
6064 */
6065 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
6066 snd_hda_codec_write(codec, codec->core.afg, 0,
6067 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
6068 msleep(10);
6069}
6070EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
6071
6054#ifdef CONFIG_PM 6072#ifdef CONFIG_PM
6055/** 6073/**
6056 * snd_hda_gen_check_power_status - check the loopback power save state 6074 * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
6078 .init = snd_hda_gen_init, 6096 .init = snd_hda_gen_init,
6079 .free = snd_hda_gen_free, 6097 .free = snd_hda_gen_free,
6080 .unsol_event = snd_hda_jack_unsol_event, 6098 .unsol_event = snd_hda_jack_unsol_event,
6099 .reboot_notify = snd_hda_gen_reboot_notify,
6081#ifdef CONFIG_PM 6100#ifdef CONFIG_PM
6082 .check_power_status = snd_hda_gen_check_power_status, 6101 .check_power_status = snd_hda_gen_check_power_status,
6083#endif 6102#endif
@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
6100 6119
6101 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); 6120 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
6102 if (err < 0) 6121 if (err < 0)
6103 return err; 6122 goto error;
6104 6123
6105 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); 6124 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
6106 if (err < 0) 6125 if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 35a670a71c42..5f199dcb0d18 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
332 struct auto_pin_cfg *cfg); 332 struct auto_pin_cfg *cfg);
333int snd_hda_gen_build_controls(struct hda_codec *codec); 333int snd_hda_gen_build_controls(struct hda_codec *codec);
334int snd_hda_gen_build_pcms(struct hda_codec *codec); 334int snd_hda_gen_build_pcms(struct hda_codec *codec);
335void snd_hda_gen_reboot_notify(struct hda_codec *codec);
335 336
336/* standard jack event callbacks */ 337/* standard jack event callbacks */
337void snd_hda_gen_hp_automute(struct hda_codec *codec, 338void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cb8b0945547c..99fc0917339b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -64,6 +64,7 @@ enum {
64 POS_FIX_VIACOMBO, 64 POS_FIX_VIACOMBO,
65 POS_FIX_COMBO, 65 POS_FIX_COMBO,
66 POS_FIX_SKL, 66 POS_FIX_SKL,
67 POS_FIX_FIFO,
67}; 68};
68 69
69/* Defines for ATI HD Audio support in SB450 south bridge */ 70/* Defines for ATI HD Audio support in SB450 south bridge */
@@ -135,7 +136,7 @@ module_param_array(model, charp, NULL, 0444);
135MODULE_PARM_DESC(model, "Use the given board model."); 136MODULE_PARM_DESC(model, "Use the given board model.");
136module_param_array(position_fix, int, NULL, 0444); 137module_param_array(position_fix, int, NULL, 0444);
137MODULE_PARM_DESC(position_fix, "DMA pointer read method." 138MODULE_PARM_DESC(position_fix, "DMA pointer read method."
138 "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+)."); 139 "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
139module_param_array(bdl_pos_adj, int, NULL, 0644); 140module_param_array(bdl_pos_adj, int, NULL, 0644);
140MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset."); 141MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
141module_param_array(probe_mask, int, NULL, 0444); 142module_param_array(probe_mask, int, NULL, 0444);
@@ -313,11 +314,10 @@ enum {
313 314
314#define AZX_DCAPS_INTEL_SKYLAKE \ 315#define AZX_DCAPS_INTEL_SKYLAKE \
315 (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ 316 (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
317 AZX_DCAPS_SYNC_WRITE |\
316 AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT) 318 AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
317 319
318#define AZX_DCAPS_INTEL_BROXTON \ 320#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE
319 (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
320 AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
321 321
322/* quirks for ATI SB / AMD Hudson */ 322/* quirks for ATI SB / AMD Hudson */
323#define AZX_DCAPS_PRESET_ATI_SB \ 323#define AZX_DCAPS_PRESET_ATI_SB \
@@ -333,6 +333,11 @@ enum {
333#define AZX_DCAPS_PRESET_ATI_HDMI_NS \ 333#define AZX_DCAPS_PRESET_ATI_HDMI_NS \
334 (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF) 334 (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
335 335
336/* quirks for AMD SB */
337#define AZX_DCAPS_PRESET_AMD_SB \
338 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
339 AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
340
336/* quirks for Nvidia */ 341/* quirks for Nvidia */
337#define AZX_DCAPS_PRESET_NVIDIA \ 342#define AZX_DCAPS_PRESET_NVIDIA \
338 (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\ 343 (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
@@ -842,6 +847,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
842 return bound_pos + mod_dma_pos; 847 return bound_pos + mod_dma_pos;
843} 848}
844 849
850#define AMD_FIFO_SIZE 32
851
852/* get the current DMA position with FIFO size correction */
853static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
854{
855 struct snd_pcm_substream *substream = azx_dev->core.substream;
856 struct snd_pcm_runtime *runtime = substream->runtime;
857 unsigned int pos, delay;
858
859 pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
860 if (!runtime)
861 return pos;
862
863 runtime->delay = AMD_FIFO_SIZE;
864 delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
865 if (azx_dev->insufficient) {
866 if (pos < delay) {
867 delay = pos;
868 runtime->delay = bytes_to_frames(runtime, pos);
869 } else {
870 azx_dev->insufficient = 0;
871 }
872 }
873
874 /* correct the DMA position for capture stream */
875 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
876 if (pos < delay)
877 pos += azx_dev->core.bufsize;
878 pos -= delay;
879 }
880
881 return pos;
882}
883
884static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
885 unsigned int pos)
886{
887 struct snd_pcm_substream *substream = azx_dev->core.substream;
888
889 /* just read back the calculated value in the above */
890 return substream->runtime->delay;
891}
892
845static unsigned int azx_skl_get_dpib_pos(struct azx *chip, 893static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
846 struct azx_dev *azx_dev) 894 struct azx_dev *azx_dev)
847{ 895{
@@ -1418,6 +1466,7 @@ static int check_position_fix(struct azx *chip, int fix)
1418 case POS_FIX_VIACOMBO: 1466 case POS_FIX_VIACOMBO:
1419 case POS_FIX_COMBO: 1467 case POS_FIX_COMBO:
1420 case POS_FIX_SKL: 1468 case POS_FIX_SKL:
1469 case POS_FIX_FIFO:
1421 return fix; 1470 return fix;
1422 } 1471 }
1423 1472
@@ -1434,6 +1483,10 @@ static int check_position_fix(struct azx *chip, int fix)
1434 dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); 1483 dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
1435 return POS_FIX_VIACOMBO; 1484 return POS_FIX_VIACOMBO;
1436 } 1485 }
1486 if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
1487 dev_dbg(chip->card->dev, "Using FIFO position fix\n");
1488 return POS_FIX_FIFO;
1489 }
1437 if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { 1490 if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
1438 dev_dbg(chip->card->dev, "Using LPIB position fix\n"); 1491 dev_dbg(chip->card->dev, "Using LPIB position fix\n");
1439 return POS_FIX_LPIB; 1492 return POS_FIX_LPIB;
@@ -1454,6 +1507,7 @@ static void assign_position_fix(struct azx *chip, int fix)
1454 [POS_FIX_VIACOMBO] = azx_via_get_position, 1507 [POS_FIX_VIACOMBO] = azx_via_get_position,
1455 [POS_FIX_COMBO] = azx_get_pos_lpib, 1508 [POS_FIX_COMBO] = azx_get_pos_lpib,
1456 [POS_FIX_SKL] = azx_get_pos_skl, 1509 [POS_FIX_SKL] = azx_get_pos_skl,
1510 [POS_FIX_FIFO] = azx_get_pos_fifo,
1457 }; 1511 };
1458 1512
1459 chip->get_position[0] = chip->get_position[1] = callbacks[fix]; 1513 chip->get_position[0] = chip->get_position[1] = callbacks[fix];
@@ -1468,6 +1522,9 @@ static void assign_position_fix(struct azx *chip, int fix)
1468 azx_get_delay_from_lpib; 1522 azx_get_delay_from_lpib;
1469 } 1523 }
1470 1524
1525 if (fix == POS_FIX_FIFO)
1526 chip->get_delay[0] = chip->get_delay[1] =
1527 azx_get_delay_from_fifo;
1471} 1528}
1472 1529
1473/* 1530/*
@@ -2448,6 +2505,12 @@ static const struct pci_device_id azx_ids[] = {
2448 /* AMD Hudson */ 2505 /* AMD Hudson */
2449 { PCI_DEVICE(0x1022, 0x780d), 2506 { PCI_DEVICE(0x1022, 0x780d),
2450 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, 2507 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
2508 /* AMD, X370 & co */
2509 { PCI_DEVICE(0x1022, 0x1457),
2510 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2511 /* AMD, X570 & co */
2512 { PCI_DEVICE(0x1022, 0x1487),
2513 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2451 /* AMD Stoney */ 2514 /* AMD Stoney */
2452 { PCI_DEVICE(0x1022, 0x157a), 2515 { PCI_DEVICE(0x1022, 0x157a),
2453 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | 2516 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0d51823d7270..6d1fb7c11f17 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1180 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4f8d0845ee1e..968d3caab6ac 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
163{ 163{
164 struct conexant_spec *spec = codec->spec; 164 struct conexant_spec *spec = codec->spec;
165 165
166 switch (codec->core.vendor_id) {
167 case 0x14f12008: /* CX8200 */
168 case 0x14f150f2: /* CX20722 */
169 case 0x14f150f4: /* CX20724 */
170 break;
171 default:
172 return;
173 }
174
175 /* Turn the problematic codec into D3 to avoid spurious noises 166 /* Turn the problematic codec into D3 to avoid spurious noises
176 from the internal speaker during (and after) reboot */ 167 from the internal speaker during (and after) reboot */
177 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); 168 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
178 169 snd_hda_gen_reboot_notify(codec);
179 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
180 snd_hda_codec_write(codec, codec->core.afg, 0,
181 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
182 msleep(10);
183} 170}
184 171
185static void cx_auto_free(struct hda_codec *codec) 172static void cx_auto_free(struct hda_codec *codec)
@@ -624,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
624 611
625/* update LED status via GPIO */ 612/* update LED status via GPIO */
626static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask, 613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
627 bool enabled) 614 bool led_on)
628{ 615{
629 struct conexant_spec *spec = codec->spec; 616 struct conexant_spec *spec = codec->spec;
630 unsigned int oldval = spec->gpio_led; 617 unsigned int oldval = spec->gpio_led;
631 618
632 if (spec->mute_led_polarity) 619 if (spec->mute_led_polarity)
633 enabled = !enabled; 620 led_on = !led_on;
634 621
635 if (enabled) 622 if (led_on)
636 spec->gpio_led &= ~mask;
637 else
638 spec->gpio_led |= mask; 623 spec->gpio_led |= mask;
624 else
625 spec->gpio_led &= ~mask;
626 codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
627 mask, led_on, spec->gpio_led);
639 if (spec->gpio_led != oldval) 628 if (spec->gpio_led != oldval)
640 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 629 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
641 spec->gpio_led); 630 spec->gpio_led);
@@ -646,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
646{ 635{
647 struct hda_codec *codec = private_data; 636 struct hda_codec *codec = private_data;
648 struct conexant_spec *spec = codec->spec; 637 struct conexant_spec *spec = codec->spec;
649 638 /* muted -> LED on */
650 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); 639 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
651} 640}
652 641
653/* turn on/off mic-mute LED via GPIO per capture hook */ 642/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -669,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
669 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 }, 658 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
670 {} 659 {}
671 }; 660 };
672 codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
673 661
674 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 662 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
675 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook; 663 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
@@ -1083,6 +1071,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
1083 */ 1071 */
1084 1072
1085static const struct hda_device_id snd_hda_id_conexant[] = { 1073static const struct hda_device_id snd_hda_id_conexant[] = {
1074 HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
1086 HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), 1075 HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
1087 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), 1076 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
1088 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), 1077 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index de224cbea7a0..e333b3e30e31 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
869 alc_shutup(codec); 869 alc_shutup(codec);
870} 870}
871 871
872/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
873static void alc_d3_at_reboot(struct hda_codec *codec)
874{
875 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
876 snd_hda_codec_write(codec, codec->core.afg, 0,
877 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
878 msleep(10);
879}
880
881#define alc_free snd_hda_gen_free 872#define alc_free snd_hda_gen_free
882 873
883#ifdef CONFIG_PM 874#ifdef CONFIG_PM
@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
5152 struct alc_spec *spec = codec->spec; 5143 struct alc_spec *spec = codec->spec;
5153 5144
5154 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 5145 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5155 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ 5146 spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
5156 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 5147 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
5157 codec->power_save_node = 0; /* avoid click noises */ 5148 codec->power_save_node = 0; /* avoid click noises */
5158 snd_hda_apply_pincfgs(codec, pincfgs); 5149 snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6987 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6978 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6988 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6979 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6989 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6980 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6981 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6990 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 6982 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6991 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 6983 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
6992 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 6984 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index a4ade6bb5beb..bc4dfafdfcd1 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -31,8 +31,8 @@ struct i2s_stream_instance {
31 u16 num_pages; 31 u16 num_pages;
32 u16 channels; 32 u16 channels;
33 u32 xfer_resolution; 33 u32 xfer_resolution;
34 struct page *pg;
35 u64 bytescount; 34 u64 bytescount;
35 dma_addr_t dma_addr;
36 void __iomem *acp3x_base; 36 void __iomem *acp3x_base;
37}; 37};
38 38
@@ -211,9 +211,8 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
211static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction) 211static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
212{ 212{
213 u16 page_idx; 213 u16 page_idx;
214 u64 addr;
215 u32 low, high, val, acp_fifo_addr; 214 u32 low, high, val, acp_fifo_addr;
216 struct page *pg = rtd->pg; 215 dma_addr_t addr = rtd->dma_addr;
217 216
218 /* 8 scratch registers used to map one 64 bit address */ 217 /* 8 scratch registers used to map one 64 bit address */
219 if (direction == SNDRV_PCM_STREAM_PLAYBACK) 218 if (direction == SNDRV_PCM_STREAM_PLAYBACK)
@@ -229,7 +228,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
229 228
230 for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) { 229 for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
231 /* Load the low address of page int ACP SRAM through SRBM */ 230 /* Load the low address of page int ACP SRAM through SRBM */
232 addr = page_to_phys(pg);
233 low = lower_32_bits(addr); 231 low = lower_32_bits(addr);
234 high = upper_32_bits(addr); 232 high = upper_32_bits(addr);
235 233
@@ -239,7 +237,7 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
239 + 4); 237 + 4);
240 /* Move to next physically contiguos page */ 238 /* Move to next physically contiguos page */
241 val += 8; 239 val += 8;
242 pg++; 240 addr += PAGE_SIZE;
243 } 241 }
244 242
245 if (direction == SNDRV_PCM_STREAM_PLAYBACK) { 243 if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -341,7 +339,6 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
341{ 339{
342 int status; 340 int status;
343 u64 size; 341 u64 size;
344 struct page *pg;
345 struct snd_pcm_runtime *runtime = substream->runtime; 342 struct snd_pcm_runtime *runtime = substream->runtime;
346 struct i2s_stream_instance *rtd = runtime->private_data; 343 struct i2s_stream_instance *rtd = runtime->private_data;
347 344
@@ -354,9 +351,8 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
354 return status; 351 return status;
355 352
356 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); 353 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
357 pg = virt_to_page(substream->dma_buffer.area); 354 if (substream->dma_buffer.area) {
358 if (pg) { 355 rtd->dma_addr = substream->dma_buffer.addr;
359 rtd->pg = pg;
360 rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT); 356 rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
361 config_acp3x_dma(rtd, substream->stream); 357 config_acp3x_dma(rtd, substream->stream);
362 status = 0; 358 status = 0;
@@ -385,9 +381,11 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
385 381
386static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd) 382static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
387{ 383{
384 struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
385 DRV_NAME);
386 struct device *parent = component->dev->parent;
388 snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV, 387 snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
389 rtd->pcm->card->dev, 388 parent, MIN_BUFFER, MAX_BUFFER);
390 MIN_BUFFER, MAX_BUFFER);
391 return 0; 389 return 0;
392} 390}
393 391
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index 6203f54d9f25..5b049fcdba20 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -47,6 +47,7 @@ struct cs42xx8_priv {
47 unsigned long sysclk; 47 unsigned long sysclk;
48 u32 tx_channels; 48 u32 tx_channels;
49 struct gpio_desc *gpiod_reset; 49 struct gpio_desc *gpiod_reset;
50 u32 rate[2];
50}; 51};
51 52
52/* -127.5dB to 0dB with step of 0.5dB */ 53/* -127.5dB to 0dB with step of 0.5dB */
@@ -176,21 +177,27 @@ static const struct snd_soc_dapm_route cs42xx8_adc3_dapm_routes[] = {
176}; 177};
177 178
178struct cs42xx8_ratios { 179struct cs42xx8_ratios {
179 unsigned int ratio; 180 unsigned int mfreq;
180 unsigned char speed; 181 unsigned int min_mclk;
181 unsigned char mclk; 182 unsigned int max_mclk;
183 unsigned int ratio[3];
182}; 184};
183 185
186/*
187 * According to reference mannual, define the cs42xx8_ratio struct
188 * MFreq2 | MFreq1 | MFreq0 | Description | SSM | DSM | QSM |
189 * 0 | 0 | 0 |1.029MHz to 12.8MHz | 256 | 128 | 64 |
190 * 0 | 0 | 1 |1.536MHz to 19.2MHz | 384 | 192 | 96 |
191 * 0 | 1 | 0 |2.048MHz to 25.6MHz | 512 | 256 | 128 |
192 * 0 | 1 | 1 |3.072MHz to 38.4MHz | 768 | 384 | 192 |
193 * 1 | x | x |4.096MHz to 51.2MHz |1024 | 512 | 256 |
194 */
184static const struct cs42xx8_ratios cs42xx8_ratios[] = { 195static const struct cs42xx8_ratios cs42xx8_ratios[] = {
185 { 64, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_256(4) }, 196 { 0, 1029000, 12800000, {256, 128, 64} },
186 { 96, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_384(4) }, 197 { 2, 1536000, 19200000, {384, 192, 96} },
187 { 128, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_512(4) }, 198 { 4, 2048000, 25600000, {512, 256, 128} },
188 { 192, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_768(4) }, 199 { 6, 3072000, 38400000, {768, 384, 192} },
189 { 256, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_256(1) }, 200 { 8, 4096000, 51200000, {1024, 512, 256} },
190 { 384, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_384(1) },
191 { 512, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_512(1) },
192 { 768, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_768(1) },
193 { 1024, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_1024(1) }
194}; 201};
195 202
196static int cs42xx8_set_dai_sysclk(struct snd_soc_dai *codec_dai, 203static int cs42xx8_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -257,14 +264,68 @@ static int cs42xx8_hw_params(struct snd_pcm_substream *substream,
257 struct snd_soc_component *component = dai->component; 264 struct snd_soc_component *component = dai->component;
258 struct cs42xx8_priv *cs42xx8 = snd_soc_component_get_drvdata(component); 265 struct cs42xx8_priv *cs42xx8 = snd_soc_component_get_drvdata(component);
259 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 266 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
260 u32 ratio = cs42xx8->sysclk / params_rate(params); 267 u32 ratio[2];
261 u32 i, fm, val, mask; 268 u32 rate[2];
269 u32 fm[2];
270 u32 i, val, mask;
271 bool condition1, condition2;
262 272
263 if (tx) 273 if (tx)
264 cs42xx8->tx_channels = params_channels(params); 274 cs42xx8->tx_channels = params_channels(params);
265 275
276 rate[tx] = params_rate(params);
277 rate[!tx] = cs42xx8->rate[!tx];
278
279 ratio[tx] = rate[tx] > 0 ? cs42xx8->sysclk / rate[tx] : 0;
280 ratio[!tx] = rate[!tx] > 0 ? cs42xx8->sysclk / rate[!tx] : 0;
281
282 /* Get functional mode for tx and rx according to rate */
283 for (i = 0; i < 2; i++) {
284 if (cs42xx8->slave_mode) {
285 fm[i] = CS42XX8_FM_AUTO;
286 } else {
287 if (rate[i] < 50000) {
288 fm[i] = CS42XX8_FM_SINGLE;
289 } else if (rate[i] > 50000 && rate[i] < 100000) {
290 fm[i] = CS42XX8_FM_DOUBLE;
291 } else if (rate[i] > 100000 && rate[i] < 200000) {
292 fm[i] = CS42XX8_FM_QUAD;
293 } else {
294 dev_err(component->dev,
295 "unsupported sample rate\n");
296 return -EINVAL;
297 }
298 }
299 }
300
266 for (i = 0; i < ARRAY_SIZE(cs42xx8_ratios); i++) { 301 for (i = 0; i < ARRAY_SIZE(cs42xx8_ratios); i++) {
267 if (cs42xx8_ratios[i].ratio == ratio) 302 /* Is the ratio[tx] valid ? */
303 condition1 = ((fm[tx] == CS42XX8_FM_AUTO) ?
304 (cs42xx8_ratios[i].ratio[0] == ratio[tx] ||
305 cs42xx8_ratios[i].ratio[1] == ratio[tx] ||
306 cs42xx8_ratios[i].ratio[2] == ratio[tx]) :
307 (cs42xx8_ratios[i].ratio[fm[tx]] == ratio[tx])) &&
308 cs42xx8->sysclk >= cs42xx8_ratios[i].min_mclk &&
309 cs42xx8->sysclk <= cs42xx8_ratios[i].max_mclk;
310
311 if (!ratio[tx])
312 condition1 = true;
313
314 /* Is the ratio[!tx] valid ? */
315 condition2 = ((fm[!tx] == CS42XX8_FM_AUTO) ?
316 (cs42xx8_ratios[i].ratio[0] == ratio[!tx] ||
317 cs42xx8_ratios[i].ratio[1] == ratio[!tx] ||
318 cs42xx8_ratios[i].ratio[2] == ratio[!tx]) :
319 (cs42xx8_ratios[i].ratio[fm[!tx]] == ratio[!tx]));
320
321 if (!ratio[!tx])
322 condition2 = true;
323
324 /*
325 * Both ratio[tx] and ratio[!tx] is valid, then we get
326 * a proper MFreq.
327 */
328 if (condition1 && condition2)
268 break; 329 break;
269 } 330 }
270 331
@@ -273,15 +334,31 @@ static int cs42xx8_hw_params(struct snd_pcm_substream *substream,
273 return -EINVAL; 334 return -EINVAL;
274 } 335 }
275 336
276 mask = CS42XX8_FUNCMOD_MFREQ_MASK; 337 cs42xx8->rate[tx] = params_rate(params);
277 val = cs42xx8_ratios[i].mclk;
278 338
279 fm = cs42xx8->slave_mode ? CS42XX8_FM_AUTO : cs42xx8_ratios[i].speed; 339 mask = CS42XX8_FUNCMOD_MFREQ_MASK;
340 val = cs42xx8_ratios[i].mfreq;
280 341
281 regmap_update_bits(cs42xx8->regmap, CS42XX8_FUNCMOD, 342 regmap_update_bits(cs42xx8->regmap, CS42XX8_FUNCMOD,
282 CS42XX8_FUNCMOD_xC_FM_MASK(tx) | mask, 343 CS42XX8_FUNCMOD_xC_FM_MASK(tx) | mask,
283 CS42XX8_FUNCMOD_xC_FM(tx, fm) | val); 344 CS42XX8_FUNCMOD_xC_FM(tx, fm[tx]) | val);
345
346 return 0;
347}
348
349static int cs42xx8_hw_free(struct snd_pcm_substream *substream,
350 struct snd_soc_dai *dai)
351{
352 struct snd_soc_component *component = dai->component;
353 struct cs42xx8_priv *cs42xx8 = snd_soc_component_get_drvdata(component);
354 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
284 355
356 /* Clear stored rate */
357 cs42xx8->rate[tx] = 0;
358
359 regmap_update_bits(cs42xx8->regmap, CS42XX8_FUNCMOD,
360 CS42XX8_FUNCMOD_xC_FM_MASK(tx),
361 CS42XX8_FUNCMOD_xC_FM(tx, CS42XX8_FM_AUTO));
285 return 0; 362 return 0;
286} 363}
287 364
@@ -302,6 +379,7 @@ static const struct snd_soc_dai_ops cs42xx8_dai_ops = {
302 .set_fmt = cs42xx8_set_dai_fmt, 379 .set_fmt = cs42xx8_set_dai_fmt,
303 .set_sysclk = cs42xx8_set_dai_sysclk, 380 .set_sysclk = cs42xx8_set_dai_sysclk,
304 .hw_params = cs42xx8_hw_params, 381 .hw_params = cs42xx8_hw_params,
382 .hw_free = cs42xx8_hw_free,
305 .digital_mute = cs42xx8_digital_mute, 383 .digital_mute = cs42xx8_digital_mute,
306}; 384};
307 385
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 6f0e28f903bf..16313b973eaa 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -20,20 +20,10 @@
20#include <sound/soc-dapm.h> 20#include <sound/soc-dapm.h>
21 21
22struct max98357a_priv { 22struct max98357a_priv {
23 struct delayed_work enable_sdmode_work;
24 struct gpio_desc *sdmode; 23 struct gpio_desc *sdmode;
25 unsigned int sdmode_delay; 24 unsigned int sdmode_delay;
26}; 25};
27 26
28static void max98357a_enable_sdmode_work(struct work_struct *work)
29{
30 struct max98357a_priv *max98357a =
31 container_of(work, struct max98357a_priv,
32 enable_sdmode_work.work);
33
34 gpiod_set_value(max98357a->sdmode, 1);
35}
36
37static int max98357a_daiops_trigger(struct snd_pcm_substream *substream, 27static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
38 int cmd, struct snd_soc_dai *dai) 28 int cmd, struct snd_soc_dai *dai)
39{ 29{
@@ -46,14 +36,12 @@ static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
46 case SNDRV_PCM_TRIGGER_START: 36 case SNDRV_PCM_TRIGGER_START:
47 case SNDRV_PCM_TRIGGER_RESUME: 37 case SNDRV_PCM_TRIGGER_RESUME:
48 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 38 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
49 queue_delayed_work(system_power_efficient_wq, 39 mdelay(max98357a->sdmode_delay);
50 &max98357a->enable_sdmode_work, 40 gpiod_set_value(max98357a->sdmode, 1);
51 msecs_to_jiffies(max98357a->sdmode_delay));
52 break; 41 break;
53 case SNDRV_PCM_TRIGGER_STOP: 42 case SNDRV_PCM_TRIGGER_STOP:
54 case SNDRV_PCM_TRIGGER_SUSPEND: 43 case SNDRV_PCM_TRIGGER_SUSPEND:
55 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 44 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
56 cancel_delayed_work_sync(&max98357a->enable_sdmode_work);
57 gpiod_set_value(max98357a->sdmode, 0); 45 gpiod_set_value(max98357a->sdmode, 0);
58 break; 46 break;
59 } 47 }
@@ -112,30 +100,25 @@ static int max98357a_platform_probe(struct platform_device *pdev)
112 int ret; 100 int ret;
113 101
114 max98357a = devm_kzalloc(&pdev->dev, sizeof(*max98357a), GFP_KERNEL); 102 max98357a = devm_kzalloc(&pdev->dev, sizeof(*max98357a), GFP_KERNEL);
115
116 if (!max98357a) 103 if (!max98357a)
117 return -ENOMEM; 104 return -ENOMEM;
118 105
119 max98357a->sdmode = devm_gpiod_get_optional(&pdev->dev, 106 max98357a->sdmode = devm_gpiod_get_optional(&pdev->dev,
120 "sdmode", GPIOD_OUT_LOW); 107 "sdmode", GPIOD_OUT_LOW);
121
122 if (IS_ERR(max98357a->sdmode)) 108 if (IS_ERR(max98357a->sdmode))
123 return PTR_ERR(max98357a->sdmode); 109 return PTR_ERR(max98357a->sdmode);
124 110
125 ret = device_property_read_u32(&pdev->dev, "sdmode-delay", 111 ret = device_property_read_u32(&pdev->dev, "sdmode-delay",
126 &max98357a->sdmode_delay); 112 &max98357a->sdmode_delay);
127
128 if (ret) { 113 if (ret) {
129 max98357a->sdmode_delay = 0; 114 max98357a->sdmode_delay = 0;
130 dev_dbg(&pdev->dev, 115 dev_dbg(&pdev->dev,
131 "no optional property 'sdmode-delay' found, default: no delay\n"); 116 "no optional property 'sdmode-delay' found, "
117 "default: no delay\n");
132 } 118 }
133 119
134 dev_set_drvdata(&pdev->dev, max98357a); 120 dev_set_drvdata(&pdev->dev, max98357a);
135 121
136 INIT_DELAYED_WORK(&max98357a->enable_sdmode_work,
137 max98357a_enable_sdmode_work);
138
139 return devm_snd_soc_register_component(&pdev->dev, 122 return devm_snd_soc_register_component(&pdev->dev,
140 &max98357a_component_driver, 123 &max98357a_component_driver,
141 &max98357a_dai_driver, 1); 124 &max98357a_dai_driver, 1);
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 528695cd6a1c..8c601a3ebc27 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -267,6 +267,12 @@ static int max98373_dai_hw_params(struct snd_pcm_substream *substream,
267 case 48000: 267 case 48000:
268 sampling_rate = MAX98373_PCM_SR_SET1_SR_48000; 268 sampling_rate = MAX98373_PCM_SR_SET1_SR_48000;
269 break; 269 break;
270 case 88200:
271 sampling_rate = MAX98373_PCM_SR_SET1_SR_88200;
272 break;
273 case 96000:
274 sampling_rate = MAX98373_PCM_SR_SET1_SR_96000;
275 break;
270 default: 276 default:
271 dev_err(component->dev, "rate %d not supported\n", 277 dev_err(component->dev, "rate %d not supported\n",
272 params_rate(params)); 278 params_rate(params));
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
index f6a37aa02f26..a59e51355a84 100644
--- a/sound/soc/codecs/max98373.h
+++ b/sound/soc/codecs/max98373.h
@@ -130,6 +130,8 @@
130#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0) 130#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0)
131#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0) 131#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0)
132#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0) 132#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0)
133#define MAX98373_PCM_SR_SET1_SR_88200 (0x9 << 0)
134#define MAX98373_PCM_SR_SET1_SR_96000 (0xA << 0)
133 135
134/* MAX98373_R2028_PCM_SR_SETUP_2 */ 136/* MAX98373_R2028_PCM_SR_SETUP_2 */
135#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4) 137#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4)
diff --git a/sound/soc/codecs/pcm3060-i2c.c b/sound/soc/codecs/pcm3060-i2c.c
index cdc8314882bc..abcdeb922201 100644
--- a/sound/soc/codecs/pcm3060-i2c.c
+++ b/sound/soc/codecs/pcm3060-i2c.c
@@ -2,7 +2,7 @@
2// 2//
3// PCM3060 I2C driver 3// PCM3060 I2C driver
4// 4//
5// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> 5// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com>
6 6
7#include <linux/i2c.h> 7#include <linux/i2c.h>
8#include <linux/module.h> 8#include <linux/module.h>
@@ -56,5 +56,5 @@ static struct i2c_driver pcm3060_i2c_driver = {
56module_i2c_driver(pcm3060_i2c_driver); 56module_i2c_driver(pcm3060_i2c_driver);
57 57
58MODULE_DESCRIPTION("PCM3060 I2C driver"); 58MODULE_DESCRIPTION("PCM3060 I2C driver");
59MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>"); 59MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.com>");
60MODULE_LICENSE("GPL v2"); 60MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060-spi.c b/sound/soc/codecs/pcm3060-spi.c
index f6f19fa80932..3b79734b832b 100644
--- a/sound/soc/codecs/pcm3060-spi.c
+++ b/sound/soc/codecs/pcm3060-spi.c
@@ -2,7 +2,7 @@
2// 2//
3// PCM3060 SPI driver 3// PCM3060 SPI driver
4// 4//
5// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> 5// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com>
6 6
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/spi/spi.h> 8#include <linux/spi/spi.h>
@@ -55,5 +55,5 @@ static struct spi_driver pcm3060_spi_driver = {
55module_spi_driver(pcm3060_spi_driver); 55module_spi_driver(pcm3060_spi_driver);
56 56
57MODULE_DESCRIPTION("PCM3060 SPI driver"); 57MODULE_DESCRIPTION("PCM3060 SPI driver");
58MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>"); 58MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.com>");
59MODULE_LICENSE("GPL v2"); 59MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
index 32b26f1c2282..b2358069cf9b 100644
--- a/sound/soc/codecs/pcm3060.c
+++ b/sound/soc/codecs/pcm3060.c
@@ -2,7 +2,7 @@
2// 2//
3// PCM3060 codec driver 3// PCM3060 codec driver
4// 4//
5// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> 5// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com>
6 6
7#include <linux/module.h> 7#include <linux/module.h>
8#include <sound/pcm_params.h> 8#include <sound/pcm_params.h>
@@ -342,5 +342,5 @@ int pcm3060_probe(struct device *dev)
342EXPORT_SYMBOL(pcm3060_probe); 342EXPORT_SYMBOL(pcm3060_probe);
343 343
344MODULE_DESCRIPTION("PCM3060 codec driver"); 344MODULE_DESCRIPTION("PCM3060 codec driver");
345MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>"); 345MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.com>");
346MODULE_LICENSE("GPL v2"); 346MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h
index 75931c9a9d85..18d51e5dac2c 100644
--- a/sound/soc/codecs/pcm3060.h
+++ b/sound/soc/codecs/pcm3060.h
@@ -2,7 +2,7 @@
2/* 2/*
3 * PCM3060 codec driver 3 * PCM3060 codec driver
4 * 4 *
5 * Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> 5 * Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com>
6 */ 6 */
7 7
8#ifndef _SND_SOC_PCM3060_H 8#ifndef _SND_SOC_PCM3060_H
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index 5605b660f4bf..0a6ff13d76e1 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -39,7 +39,7 @@ static const struct reg_sequence init_list[] = {
39 { RT1011_POWER_9, 0xa840 }, 39 { RT1011_POWER_9, 0xa840 },
40 40
41 { RT1011_ADC_SET_5, 0x0a20 }, 41 { RT1011_ADC_SET_5, 0x0a20 },
42 { RT1011_DAC_SET_2, 0xa232 }, 42 { RT1011_DAC_SET_2, 0xa032 },
43 { RT1011_ADC_SET_1, 0x2925 }, 43 { RT1011_ADC_SET_1, 0x2925 },
44 44
45 { RT1011_SPK_PRO_DC_DET_1, 0xb00c }, 45 { RT1011_SPK_PRO_DC_DET_1, 0xb00c },
@@ -1917,7 +1917,7 @@ static int rt1011_set_bias_level(struct snd_soc_component *component,
1917 snd_soc_component_write(component, 1917 snd_soc_component_write(component,
1918 RT1011_SYSTEM_RESET_2, 0x0000); 1918 RT1011_SYSTEM_RESET_2, 0x0000);
1919 snd_soc_component_write(component, 1919 snd_soc_component_write(component,
1920 RT1011_SYSTEM_RESET_3, 0x0000); 1920 RT1011_SYSTEM_RESET_3, 0x0001);
1921 snd_soc_component_write(component, 1921 snd_soc_component_write(component,
1922 RT1011_SYSTEM_RESET_1, 0x003f); 1922 RT1011_SYSTEM_RESET_1, 0x003f);
1923 snd_soc_component_write(component, 1923 snd_soc_component_write(component,
diff --git a/sound/soc/codecs/rt1308.c b/sound/soc/codecs/rt1308.c
index d673506c7c39..d673506c7c39 100755..100644
--- a/sound/soc/codecs/rt1308.c
+++ b/sound/soc/codecs/rt1308.c
diff --git a/sound/soc/codecs/rt1308.h b/sound/soc/codecs/rt1308.h
index c330aae1d527..c330aae1d527 100755..100644
--- a/sound/soc/codecs/rt1308.h
+++ b/sound/soc/codecs/rt1308.h
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 30a4e8399ec3..288df245b2f0 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -63,6 +63,7 @@ static int graph_get_dai_id(struct device_node *ep)
63 struct device_node *endpoint; 63 struct device_node *endpoint;
64 struct of_endpoint info; 64 struct of_endpoint info;
65 int i, id; 65 int i, id;
66 const u32 *reg;
66 int ret; 67 int ret;
67 68
68 /* use driver specified DAI ID if exist */ 69 /* use driver specified DAI ID if exist */
@@ -83,8 +84,9 @@ static int graph_get_dai_id(struct device_node *ep)
83 return info.id; 84 return info.id;
84 85
85 node = of_get_parent(ep); 86 node = of_get_parent(ep);
87 reg = of_get_property(node, "reg", NULL);
86 of_node_put(node); 88 of_node_put(node);
87 if (of_get_property(node, "reg", NULL)) 89 if (reg)
88 return info.port; 90 return info.port;
89 } 91 }
90 node = of_graph_get_port_parent(ep); 92 node = of_graph_get_port_parent(ep);
@@ -208,10 +210,6 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
208 210
209 dev_dbg(dev, "link_of DPCM (%pOF)\n", ep); 211 dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
210 212
211 of_node_put(ports);
212 of_node_put(port);
213 of_node_put(node);
214
215 if (li->cpu) { 213 if (li->cpu) {
216 int is_single_links = 0; 214 int is_single_links = 0;
217 215
@@ -229,17 +227,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
229 227
230 ret = asoc_simple_parse_cpu(ep, dai_link, &is_single_links); 228 ret = asoc_simple_parse_cpu(ep, dai_link, &is_single_links);
231 if (ret) 229 if (ret)
232 return ret; 230 goto out_put_node;
233 231
234 ret = asoc_simple_parse_clk_cpu(dev, ep, dai_link, dai); 232 ret = asoc_simple_parse_clk_cpu(dev, ep, dai_link, dai);
235 if (ret < 0) 233 if (ret < 0)
236 return ret; 234 goto out_put_node;
237 235
238 ret = asoc_simple_set_dailink_name(dev, dai_link, 236 ret = asoc_simple_set_dailink_name(dev, dai_link,
239 "fe.%s", 237 "fe.%s",
240 cpus->dai_name); 238 cpus->dai_name);
241 if (ret < 0) 239 if (ret < 0)
242 return ret; 240 goto out_put_node;
243 241
244 /* card->num_links includes Codec */ 242 /* card->num_links includes Codec */
245 asoc_simple_canonicalize_cpu(dai_link, is_single_links); 243 asoc_simple_canonicalize_cpu(dai_link, is_single_links);
@@ -263,17 +261,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
263 261
264 ret = asoc_simple_parse_codec(ep, dai_link); 262 ret = asoc_simple_parse_codec(ep, dai_link);
265 if (ret < 0) 263 if (ret < 0)
266 return ret; 264 goto out_put_node;
267 265
268 ret = asoc_simple_parse_clk_codec(dev, ep, dai_link, dai); 266 ret = asoc_simple_parse_clk_codec(dev, ep, dai_link, dai);
269 if (ret < 0) 267 if (ret < 0)
270 return ret; 268 goto out_put_node;
271 269
272 ret = asoc_simple_set_dailink_name(dev, dai_link, 270 ret = asoc_simple_set_dailink_name(dev, dai_link,
273 "be.%s", 271 "be.%s",
274 codecs->dai_name); 272 codecs->dai_name);
275 if (ret < 0) 273 if (ret < 0)
276 return ret; 274 goto out_put_node;
277 275
278 /* check "prefix" from top node */ 276 /* check "prefix" from top node */
279 snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, 277 snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
@@ -293,19 +291,23 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
293 291
294 ret = asoc_simple_parse_tdm(ep, dai); 292 ret = asoc_simple_parse_tdm(ep, dai);
295 if (ret) 293 if (ret)
296 return ret; 294 goto out_put_node;
297 295
298 ret = asoc_simple_parse_daifmt(dev, cpu_ep, codec_ep, 296 ret = asoc_simple_parse_daifmt(dev, cpu_ep, codec_ep,
299 NULL, &dai_link->dai_fmt); 297 NULL, &dai_link->dai_fmt);
300 if (ret < 0) 298 if (ret < 0)
301 return ret; 299 goto out_put_node;
302 300
303 dai_link->dpcm_playback = 1; 301 dai_link->dpcm_playback = 1;
304 dai_link->dpcm_capture = 1; 302 dai_link->dpcm_capture = 1;
305 dai_link->ops = &graph_ops; 303 dai_link->ops = &graph_ops;
306 dai_link->init = asoc_simple_dai_init; 304 dai_link->init = asoc_simple_dai_init;
307 305
308 return 0; 306out_put_node:
307 of_node_put(ports);
308 of_node_put(port);
309 of_node_put(node);
310 return ret;
309} 311}
310 312
311static int graph_dai_link_of(struct asoc_simple_priv *priv, 313static int graph_dai_link_of(struct asoc_simple_priv *priv,
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index ac8678fe55ff..556b1a789629 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -349,6 +349,13 @@ void asoc_simple_canonicalize_platform(struct snd_soc_dai_link *dai_link)
349 /* Assumes platform == cpu */ 349 /* Assumes platform == cpu */
350 if (!dai_link->platforms->of_node) 350 if (!dai_link->platforms->of_node)
351 dai_link->platforms->of_node = dai_link->cpus->of_node; 351 dai_link->platforms->of_node = dai_link->cpus->of_node;
352
353 /*
354 * DPCM BE can be no platform.
355 * Alloced memory will be waste, but not leak.
356 */
357 if (!dai_link->platforms->of_node)
358 dai_link->num_platforms = 0;
352} 359}
353EXPORT_SYMBOL_GPL(asoc_simple_canonicalize_platform); 360EXPORT_SYMBOL_GPL(asoc_simple_canonicalize_platform);
354 361
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index e5cde0d5e63c..ef849151ba56 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -124,8 +124,6 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
124 124
125 li->link++; 125 li->link++;
126 126
127 of_node_put(node);
128
129 /* For single DAI link & old style of DT node */ 127 /* For single DAI link & old style of DT node */
130 if (is_top) 128 if (is_top)
131 prefix = PREFIX; 129 prefix = PREFIX;
@@ -147,17 +145,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
147 145
148 ret = asoc_simple_parse_cpu(np, dai_link, &is_single_links); 146 ret = asoc_simple_parse_cpu(np, dai_link, &is_single_links);
149 if (ret) 147 if (ret)
150 return ret; 148 goto out_put_node;
151 149
152 ret = asoc_simple_parse_clk_cpu(dev, np, dai_link, dai); 150 ret = asoc_simple_parse_clk_cpu(dev, np, dai_link, dai);
153 if (ret < 0) 151 if (ret < 0)
154 return ret; 152 goto out_put_node;
155 153
156 ret = asoc_simple_set_dailink_name(dev, dai_link, 154 ret = asoc_simple_set_dailink_name(dev, dai_link,
157 "fe.%s", 155 "fe.%s",
158 cpus->dai_name); 156 cpus->dai_name);
159 if (ret < 0) 157 if (ret < 0)
160 return ret; 158 goto out_put_node;
161 159
162 asoc_simple_canonicalize_cpu(dai_link, is_single_links); 160 asoc_simple_canonicalize_cpu(dai_link, is_single_links);
163 } else { 161 } else {
@@ -180,17 +178,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
180 178
181 ret = asoc_simple_parse_codec(np, dai_link); 179 ret = asoc_simple_parse_codec(np, dai_link);
182 if (ret < 0) 180 if (ret < 0)
183 return ret; 181 goto out_put_node;
184 182
185 ret = asoc_simple_parse_clk_codec(dev, np, dai_link, dai); 183 ret = asoc_simple_parse_clk_codec(dev, np, dai_link, dai);
186 if (ret < 0) 184 if (ret < 0)
187 return ret; 185 goto out_put_node;
188 186
189 ret = asoc_simple_set_dailink_name(dev, dai_link, 187 ret = asoc_simple_set_dailink_name(dev, dai_link,
190 "be.%s", 188 "be.%s",
191 codecs->dai_name); 189 codecs->dai_name);
192 if (ret < 0) 190 if (ret < 0)
193 return ret; 191 goto out_put_node;
194 192
195 /* check "prefix" from top node */ 193 /* check "prefix" from top node */
196 snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, 194 snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
@@ -208,19 +206,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
208 206
209 ret = asoc_simple_parse_tdm(np, dai); 207 ret = asoc_simple_parse_tdm(np, dai);
210 if (ret) 208 if (ret)
211 return ret; 209 goto out_put_node;
212 210
213 ret = asoc_simple_parse_daifmt(dev, node, codec, 211 ret = asoc_simple_parse_daifmt(dev, node, codec,
214 prefix, &dai_link->dai_fmt); 212 prefix, &dai_link->dai_fmt);
215 if (ret < 0) 213 if (ret < 0)
216 return ret; 214 goto out_put_node;
217 215
218 dai_link->dpcm_playback = 1; 216 dai_link->dpcm_playback = 1;
219 dai_link->dpcm_capture = 1; 217 dai_link->dpcm_capture = 1;
220 dai_link->ops = &simple_ops; 218 dai_link->ops = &simple_ops;
221 dai_link->init = asoc_simple_dai_init; 219 dai_link->init = asoc_simple_dai_init;
222 220
223 return 0; 221out_put_node:
222 of_node_put(node);
223 return ret;
224} 224}
225 225
226static int simple_dai_link_of(struct asoc_simple_priv *priv, 226static int simple_dai_link_of(struct asoc_simple_priv *priv,
@@ -364,8 +364,6 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
364 goto error; 364 goto error;
365 } 365 }
366 366
367 of_node_put(codec);
368
369 /* get convert-xxx property */ 367 /* get convert-xxx property */
370 memset(&adata, 0, sizeof(adata)); 368 memset(&adata, 0, sizeof(adata));
371 for_each_child_of_node(node, np) 369 for_each_child_of_node(node, np)
@@ -387,11 +385,13 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
387 ret = func_noml(priv, np, codec, li, is_top); 385 ret = func_noml(priv, np, codec, li, is_top);
388 386
389 if (ret < 0) { 387 if (ret < 0) {
388 of_node_put(codec);
390 of_node_put(np); 389 of_node_put(np);
391 goto error; 390 goto error;
392 } 391 }
393 } 392 }
394 393
394 of_node_put(codec);
395 node = of_get_next_child(top, node); 395 node = of_get_next_child(top, node);
396 } while (!is_top && node); 396 } while (!is_top && node);
397 397
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index fac09be3cade..46612331f5ea 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -437,6 +437,14 @@ static const struct acpi_gpio_mapping byt_cht_es8316_gpios[] = {
437 437
438/* Please keep this list alphabetically sorted */ 438/* Please keep this list alphabetically sorted */
439static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { 439static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
440 { /* Irbis NB41 */
441 .matches = {
442 DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
443 DMI_MATCH(DMI_PRODUCT_NAME, "NB41"),
444 },
445 .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP
446 | BYT_CHT_ES8316_JD_INVERTED),
447 },
440 { /* Teclast X98 Plus II */ 448 { /* Teclast X98 Plus II */
441 .matches = { 449 .matches = {
442 DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"), 450 DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
index 229e39586868..4a5adae1d785 100644
--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * soc-apci-intel-bxt-match.c - tables and support for BXT ACPI enumeration. 3 * soc-acpi-intel-bxt-match.c - tables and support for BXT ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2018, Intel Corporation. 5 * Copyright (c) 2018, Intel Corporation.
6 * 6 *
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
index b94b482ac34f..1cc801ba92eb 100644
--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * soc-apci-intel-byt-match.c - tables and support for BYT ACPI enumeration. 3 * soc-acpi-intel-byt-match.c - tables and support for BYT ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2017, Intel Corporation. 5 * Copyright (c) 2017, Intel Corporation.
6 */ 6 */
diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
index b7f11f6be1cf..d0fb43c2b9f6 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * soc-apci-intel-cht-match.c - tables and support for CHT ACPI enumeration. 3 * soc-acpi-intel-cht-match.c - tables and support for CHT ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2017, Intel Corporation. 5 * Copyright (c) 2017, Intel Corporation.
6 */ 6 */
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index c36c0aa4f683..771b0ef21051 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * soc-apci-intel-cnl-match.c - tables and support for CNL ACPI enumeration. 3 * soc-acpi-intel-cnl-match.c - tables and support for CNL ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2018, Intel Corporation. 5 * Copyright (c) 2018, Intel Corporation.
6 * 6 *
diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
index 616eb09e78a0..60dea358fa04 100644
--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * soc-apci-intel-glk-match.c - tables and support for GLK ACPI enumeration. 3 * soc-acpi-intel-glk-match.c - tables and support for GLK ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2018, Intel Corporation. 5 * Copyright (c) 2018, Intel Corporation.
6 * 6 *
diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
index 68ae43f7b4b2..cc972d2ac691 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hda-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
@@ -2,7 +2,7 @@
2// Copyright (c) 2018, Intel Corporation. 2// Copyright (c) 2018, Intel Corporation.
3 3
4/* 4/*
5 * soc-apci-intel-hda-match.c - tables and support for HDA+ACPI enumeration. 5 * soc-acpi-intel-hda-match.c - tables and support for HDA+ACPI enumeration.
6 * 6 *
7 */ 7 */
8 8
diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
index d27853e7a369..34eb0baaa951 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * soc-apci-intel-hsw-bdw-match.c - tables and support for ACPI enumeration. 3 * soc-acpi-intel-hsw-bdw-match.c - tables and support for ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2017, Intel Corporation. 5 * Copyright (c) 2017, Intel Corporation.
6 */ 6 */
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index 0b430b9b3673..38977669b576 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * soc-apci-intel-icl-match.c - tables and support for ICL ACPI enumeration. 3 * soc-acpi-intel-icl-match.c - tables and support for ICL ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2018, Intel Corporation. 5 * Copyright (c) 2018, Intel Corporation.
6 * 6 *
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index 4b331058e807..e200baa11011 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * soc-apci-intel-kbl-match.c - tables and support for KBL ACPI enumeration. 3 * soc-acpi-intel-kbl-match.c - tables and support for KBL ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2018, Intel Corporation. 5 * Copyright (c) 2018, Intel Corporation.
6 * 6 *
diff --git a/sound/soc/intel/common/soc-acpi-intel-skl-match.c b/sound/soc/intel/common/soc-acpi-intel-skl-match.c
index 0c9c0edd35b3..42fa40a8d932 100644
--- a/sound/soc/intel/common/soc-acpi-intel-skl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-skl-match.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * soc-apci-intel-skl-match.c - tables and support for SKL ACPI enumeration. 3 * soc-acpi-intel-skl-match.c - tables and support for SKL ACPI enumeration.
4 * 4 *
5 * Copyright (c) 2018, Intel Corporation. 5 * Copyright (c) 2018, Intel Corporation.
6 * 6 *
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index f60a71990f66..ac75838bbfab 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -150,17 +150,17 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
150 150
151 link = data->dai_link; 151 link = data->dai_link;
152 152
153 dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL); 153 for_each_child_of_node(node, np) {
154 if (!dlc) 154 dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL);
155 return ERR_PTR(-ENOMEM); 155 if (!dlc)
156 return ERR_PTR(-ENOMEM);
156 157
157 link->cpus = &dlc[0]; 158 link->cpus = &dlc[0];
158 link->platforms = &dlc[1]; 159 link->platforms = &dlc[1];
159 160
160 link->num_cpus = 1; 161 link->num_cpus = 1;
161 link->num_platforms = 1; 162 link->num_platforms = 1;
162 163
163 for_each_child_of_node(node, np) {
164 cpu = of_get_child_by_name(np, "cpu"); 164 cpu = of_get_child_by_name(np, "cpu");
165 codec = of_get_child_by_name(np, "codec"); 165 codec = of_get_child_by_name(np, "codec");
166 166
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 0a34d0eb8dba..88ebaf6e1880 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -326,7 +326,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
326 val |= I2S_CHN_4; 326 val |= I2S_CHN_4;
327 break; 327 break;
328 case 2: 328 case 2:
329 case 1:
330 val |= I2S_CHN_2; 329 val |= I2S_CHN_2;
331 break; 330 break;
332 default: 331 default:
@@ -459,7 +458,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
459 }, 458 },
460 .capture = { 459 .capture = {
461 .stream_name = "Capture", 460 .stream_name = "Capture",
462 .channels_min = 1, 461 .channels_min = 2,
463 .channels_max = 2, 462 .channels_max = 2,
464 .rates = SNDRV_PCM_RATE_8000_192000, 463 .rates = SNDRV_PCM_RATE_8000_192000,
465 .formats = (SNDRV_PCM_FMTBIT_S8 | 464 .formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -659,7 +658,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
659 } 658 }
660 659
661 if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) { 660 if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
662 if (val >= 1 && val <= 8) 661 if (val >= 2 && val <= 8)
663 soc_dai->capture.channels_max = val; 662 soc_dai->capture.channels_max = val;
664 } 663 }
665 664
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index c5fc24675a33..782e534d4c0d 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -61,6 +61,37 @@ static const struct snd_kcontrol_new rk_mc_controls[] = {
61 SOC_DAPM_PIN_SWITCH("Speaker"), 61 SOC_DAPM_PIN_SWITCH("Speaker"),
62}; 62};
63 63
64static int rk_jack_event(struct notifier_block *nb, unsigned long event,
65 void *data)
66{
67 struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
68 struct snd_soc_dapm_context *dapm = &jack->card->dapm;
69
70 if (event & SND_JACK_MICROPHONE)
71 snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
72 else
73 snd_soc_dapm_disable_pin(dapm, "MICBIAS");
74
75 snd_soc_dapm_sync(dapm);
76
77 return 0;
78}
79
80static struct notifier_block rk_jack_nb = {
81 .notifier_call = rk_jack_event,
82};
83
84static int rk_init(struct snd_soc_pcm_runtime *runtime)
85{
86 /*
87 * The jack has already been created in the rk_98090_headset_init()
88 * function.
89 */
90 snd_soc_jack_notifier_register(&headset_jack, &rk_jack_nb);
91
92 return 0;
93}
94
64static int rk_aif1_hw_params(struct snd_pcm_substream *substream, 95static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
65 struct snd_pcm_hw_params *params) 96 struct snd_pcm_hw_params *params)
66{ 97{
@@ -119,6 +150,7 @@ SND_SOC_DAILINK_DEFS(hifi,
119static struct snd_soc_dai_link rk_dailink = { 150static struct snd_soc_dai_link rk_dailink = {
120 .name = "max98090", 151 .name = "max98090",
121 .stream_name = "Audio", 152 .stream_name = "Audio",
153 .init = rk_init,
122 .ops = &rk_aif1_ops, 154 .ops = &rk_aif1_ops,
123 /* set max98090 as slave */ 155 /* set max98090 as slave */
124 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | 156 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
index dfb6e460e7eb..f0f5fa9c27d3 100644
--- a/sound/soc/samsung/odroid.c
+++ b/sound/soc/samsung/odroid.c
@@ -284,9 +284,8 @@ static int odroid_audio_probe(struct platform_device *pdev)
284 } 284 }
285 285
286 of_node_put(cpu); 286 of_node_put(cpu);
287 of_node_put(codec);
288 if (ret < 0) 287 if (ret < 0)
289 return ret; 288 goto err_put_node;
290 289
291 ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link); 290 ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link);
292 if (ret < 0) 291 if (ret < 0)
@@ -309,7 +308,6 @@ static int odroid_audio_probe(struct platform_device *pdev)
309 ret = PTR_ERR(priv->clk_i2s_bus); 308 ret = PTR_ERR(priv->clk_i2s_bus);
310 goto err_put_sclk; 309 goto err_put_sclk;
311 } 310 }
312 of_node_put(cpu_dai);
313 311
314 ret = devm_snd_soc_register_card(dev, card); 312 ret = devm_snd_soc_register_card(dev, card);
315 if (ret < 0) { 313 if (ret < 0) {
@@ -317,6 +315,8 @@ static int odroid_audio_probe(struct platform_device *pdev)
317 goto err_put_clk_i2s; 315 goto err_put_clk_i2s;
318 } 316 }
319 317
318 of_node_put(cpu_dai);
319 of_node_put(codec);
320 return 0; 320 return 0;
321 321
322err_put_clk_i2s: 322err_put_clk_i2s:
@@ -326,6 +326,8 @@ err_put_sclk:
326err_put_cpu_dai: 326err_put_cpu_dai:
327 of_node_put(cpu_dai); 327 of_node_put(cpu_dai);
328 snd_soc_of_put_dai_link_codecs(codec_link); 328 snd_soc_of_put_dai_link_codecs(codec_link);
329err_put_node:
330 of_node_put(codec);
329 return ret; 331 return ret;
330} 332}
331 333
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index fd6eaae6c0ed..44f899b970c2 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1515,8 +1515,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
1515 } 1515 }
1516 } 1516 }
1517 1517
1518 if (dai_link->dai_fmt) 1518 if (dai_link->dai_fmt) {
1519 snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt); 1519 ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
1520 if (ret)
1521 return ret;
1522 }
1520 1523
1521 ret = soc_post_component_init(rtd, dai_link->name); 1524 ret = soc_post_component_init(rtd, dai_link->name);
1522 if (ret) 1525 if (ret)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f013b24c050a..2790c00735f3 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1157,8 +1157,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
1157 list_add_tail(&widget->work_list, list); 1157 list_add_tail(&widget->work_list, list);
1158 1158
1159 if (custom_stop_condition && custom_stop_condition(widget, dir)) { 1159 if (custom_stop_condition && custom_stop_condition(widget, dir)) {
1160 widget->endpoints[dir] = 1; 1160 list = NULL;
1161 return widget->endpoints[dir]; 1161 custom_stop_condition = NULL;
1162 } 1162 }
1163 1163
1164 if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) { 1164 if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
@@ -1195,8 +1195,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
1195 * 1195 *
1196 * Optionally, can be supplied with a function acting as a stopping condition. 1196 * Optionally, can be supplied with a function acting as a stopping condition.
1197 * This function takes the dapm widget currently being examined and the walk 1197 * This function takes the dapm widget currently being examined and the walk
1198 * direction as an arguments, it should return true if the walk should be 1198 * direction as an arguments, it should return true if widgets from that point
1199 * stopped and false otherwise. 1199 * in the graph onwards should not be added to the widget list.
1200 */ 1200 */
1201static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, 1201static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
1202 struct list_head *list, 1202 struct list_head *list,
@@ -3706,6 +3706,8 @@ request_failed:
3706 dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n", 3706 dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
3707 w->name, ret); 3707 w->name, ret);
3708 3708
3709 kfree_const(w->sname);
3710 kfree(w);
3709 return ERR_PTR(ret); 3711 return ERR_PTR(ret);
3710} 3712}
3711 3713
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index f2b392998f20..ffd8d4394537 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -101,8 +101,8 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
101 /* 101 /*
102 * This interrupt is not shared so no need to return IRQ_NONE. 102 * This interrupt is not shared so no need to return IRQ_NONE.
103 */ 103 */
104 dev_err_ratelimited(sdev->dev, 104 dev_dbg_ratelimited(sdev->dev,
105 "error: nothing to do in IRQ thread\n"); 105 "nothing to do in IPC IRQ thread\n");
106 } 106 }
107 107
108 /* re-enable IPC interrupt */ 108 /* re-enable IPC interrupt */
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index 50244b82600c..2ecba91f5219 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -224,8 +224,8 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
224 /* 224 /*
225 * This interrupt is not shared so no need to return IRQ_NONE. 225 * This interrupt is not shared so no need to return IRQ_NONE.
226 */ 226 */
227 dev_err_ratelimited(sdev->dev, 227 dev_dbg_ratelimited(sdev->dev,
228 "error: nothing to do in IRQ thread\n"); 228 "nothing to do in IPC IRQ thread\n");
229 } 229 }
230 230
231 /* re-enable IPC interrupt */ 231 /* re-enable IPC interrupt */
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 9b2232908b65..7fa5c61169db 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -1002,8 +1002,8 @@ static const struct sun4i_i2s_quirks sun50i_a64_codec_i2s_quirks = {
1002 .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31), 1002 .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
1003 .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2), 1003 .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
1004 .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2), 1004 .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
1005 .get_sr = sun8i_i2s_get_sr_wss, 1005 .get_sr = sun4i_i2s_get_sr,
1006 .get_wss = sun8i_i2s_get_sr_wss, 1006 .get_wss = sun4i_i2s_get_wss,
1007}; 1007};
1008 1008
1009static int sun4i_i2s_init_regmap_fields(struct device *dev, 1009static int sun4i_i2s_init_regmap_fields(struct device *dev,
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index ac59b509ead5..bc7bf15ed7a4 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -195,7 +195,7 @@ static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable)
195{ 195{
196 u32 bit; 196 u32 bit;
197 197
198 for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AFSR) { 198 for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AMUTE) {
199 if (enable) 199 if (enable)
200 mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit)); 200 mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
201 else 201 else
@@ -223,6 +223,7 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
223 if (mcasp_is_synchronous(mcasp)) { 223 if (mcasp_is_synchronous(mcasp)) {
224 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST); 224 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
225 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST); 225 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
226 mcasp_set_clk_pdir(mcasp, true);
226 } 227 }
227 228
228 /* Activate serializer(s) */ 229 /* Activate serializer(s) */
@@ -1256,6 +1257,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
1256 return ret; 1257 return ret;
1257} 1258}
1258 1259
1260static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
1261 struct snd_pcm_hw_rule *rule)
1262{
1263 struct davinci_mcasp_ruledata *rd = rule->private;
1264 struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
1265 struct snd_mask nfmt;
1266 int i, slot_width;
1267
1268 snd_mask_none(&nfmt);
1269 slot_width = rd->mcasp->slot_width;
1270
1271 for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
1272 if (snd_mask_test(fmt, i)) {
1273 if (snd_pcm_format_width(i) <= slot_width) {
1274 snd_mask_set(&nfmt, i);
1275 }
1276 }
1277 }
1278
1279 return snd_mask_refine(fmt, &nfmt);
1280}
1281
1259static const unsigned int davinci_mcasp_dai_rates[] = { 1282static const unsigned int davinci_mcasp_dai_rates[] = {
1260 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 1283 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
1261 88200, 96000, 176400, 192000, 1284 88200, 96000, 176400, 192000,
@@ -1377,7 +1400,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
1377 struct davinci_mcasp_ruledata *ruledata = 1400 struct davinci_mcasp_ruledata *ruledata =
1378 &mcasp->ruledata[substream->stream]; 1401 &mcasp->ruledata[substream->stream];
1379 u32 max_channels = 0; 1402 u32 max_channels = 0;
1380 int i, dir; 1403 int i, dir, ret;
1381 int tdm_slots = mcasp->tdm_slots; 1404 int tdm_slots = mcasp->tdm_slots;
1382 1405
1383 /* Do not allow more then one stream per direction */ 1406 /* Do not allow more then one stream per direction */
@@ -1406,6 +1429,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
1406 max_channels++; 1429 max_channels++;
1407 } 1430 }
1408 ruledata->serializers = max_channels; 1431 ruledata->serializers = max_channels;
1432 ruledata->mcasp = mcasp;
1409 max_channels *= tdm_slots; 1433 max_channels *= tdm_slots;
1410 /* 1434 /*
1411 * If the already active stream has less channels than the calculated 1435 * If the already active stream has less channels than the calculated
@@ -1431,20 +1455,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
1431 0, SNDRV_PCM_HW_PARAM_CHANNELS, 1455 0, SNDRV_PCM_HW_PARAM_CHANNELS,
1432 &mcasp->chconstr[substream->stream]); 1456 &mcasp->chconstr[substream->stream]);
1433 1457
1434 if (mcasp->slot_width) 1458 if (mcasp->slot_width) {
1435 snd_pcm_hw_constraint_minmax(substream->runtime, 1459 /* Only allow formats require <= slot_width bits on the bus */
1436 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 1460 ret = snd_pcm_hw_rule_add(substream->runtime, 0,
1437 8, mcasp->slot_width); 1461 SNDRV_PCM_HW_PARAM_FORMAT,
1462 davinci_mcasp_hw_rule_slot_width,
1463 ruledata,
1464 SNDRV_PCM_HW_PARAM_FORMAT, -1);
1465 if (ret)
1466 return ret;
1467 }
1438 1468
1439 /* 1469 /*
1440 * If we rely on implicit BCLK divider setting we should 1470 * If we rely on implicit BCLK divider setting we should
1441 * set constraints based on what we can provide. 1471 * set constraints based on what we can provide.
1442 */ 1472 */
1443 if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) { 1473 if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
1444 int ret;
1445
1446 ruledata->mcasp = mcasp;
1447
1448 ret = snd_pcm_hw_rule_add(substream->runtime, 0, 1474 ret = snd_pcm_hw_rule_add(substream->runtime, 0,
1449 SNDRV_PCM_HW_PARAM_RATE, 1475 SNDRV_PCM_HW_PARAM_RATE,
1450 davinci_mcasp_hw_rule_rate, 1476 davinci_mcasp_hw_rule_rate,
diff --git a/sound/sound_core.c b/sound/sound_core.c
index b730d97c4de6..90d118cd9164 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -275,7 +275,8 @@ retry:
275 goto retry; 275 goto retry;
276 } 276 }
277 spin_unlock(&sound_loader_lock); 277 spin_unlock(&sound_loader_lock);
278 return -EBUSY; 278 r = -EBUSY;
279 goto fail;
279 } 280 }
280 } 281 }
281 282
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 71d5f540334a..4c12cc5b53fd 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
72 struct usb_host_endpoint *ep; 72 struct usb_host_endpoint *ep;
73 73
74 ep = usb_pipe_endpoint(dev, pipe); 74 ep = usb_pipe_endpoint(dev, pipe);
75 if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) 75 if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
76 return -EINVAL; 76 return -EINVAL;
77 return 0; 77 return 0;
78} 78}
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 14fc1e1d5d13..c406497c5919 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -600,14 +600,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
600 ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP, 600 ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
601 hiface_pcm_out_urb_handler); 601 hiface_pcm_out_urb_handler);
602 if (ret < 0) 602 if (ret < 0)
603 return ret; 603 goto error;
604 } 604 }
605 605
606 ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm); 606 ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
607 if (ret < 0) { 607 if (ret < 0) {
608 kfree(rt);
609 dev_err(&chip->dev->dev, "Cannot create pcm instance\n"); 608 dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
610 return ret; 609 goto error;
611 } 610 }
612 611
613 pcm->private_data = rt; 612 pcm->private_data = rt;
@@ -620,4 +619,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
620 619
621 chip->pcm = rt; 620 chip->pcm = rt;
622 return 0; 621 return 0;
622
623error:
624 for (i = 0; i < PCM_N_URBS; i++)
625 kfree(rt->out_urbs[i].buffer);
626 kfree(rt);
627 return ret;
623} 628}
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 2c03e0f6bf72..f70211e6b174 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
550 line6pcm->volume_monitor = 255; 550 line6pcm->volume_monitor = 255;
551 line6pcm->line6 = line6; 551 line6pcm->line6 = line6;
552 552
553 spin_lock_init(&line6pcm->out.lock);
554 spin_lock_init(&line6pcm->in.lock);
555 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
556
557 line6->line6pcm = line6pcm;
558
559 pcm->private_data = line6pcm;
560 pcm->private_free = line6_cleanup_pcm;
561
553 line6pcm->max_packet_size_in = 562 line6pcm->max_packet_size_in =
554 usb_maxpacket(line6->usbdev, 563 usb_maxpacket(line6->usbdev,
555 usb_rcvisocpipe(line6->usbdev, ep_read), 0); 564 usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
562 return -EINVAL; 571 return -EINVAL;
563 } 572 }
564 573
565 spin_lock_init(&line6pcm->out.lock);
566 spin_lock_init(&line6pcm->in.lock);
567 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
568
569 line6->line6pcm = line6pcm;
570
571 pcm->private_data = line6pcm;
572 pcm->private_free = line6_cleanup_pcm;
573
574 err = line6_create_audio_out_urbs(line6pcm); 574 err = line6_create_audio_out_urbs(line6pcm);
575 if (err < 0) 575 if (err < 0)
576 return err; 576 return err;
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index f0662bd4e50f..27bf61c177c0 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -368,7 +368,7 @@ static const struct line6_properties podhd_properties_table[] = {
368 .name = "POD HD500", 368 .name = "POD HD500",
369 .capabilities = LINE6_CAP_PCM 369 .capabilities = LINE6_CAP_PCM
370 | LINE6_CAP_HWMON, 370 | LINE6_CAP_HWMON,
371 .altsetting = 1, 371 .altsetting = 0,
372 .ep_ctrl_r = 0x81, 372 .ep_ctrl_r = 0x81,
373 .ep_ctrl_w = 0x01, 373 .ep_ctrl_w = 0x01,
374 .ep_audio_r = 0x86, 374 .ep_audio_r = 0x86,
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index 0d24c72c155f..ed158f04de80 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -244,5 +244,5 @@ static struct usb_driver variax_driver = {
244 244
245module_usb_driver(variax_driver); 245module_usb_driver(variax_driver);
246 246
247MODULE_DESCRIPTION("Vairax Workbench USB driver"); 247MODULE_DESCRIPTION("Variax Workbench USB driver");
248MODULE_LICENSE("GPL"); 248MODULE_LICENSE("GPL");
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 7498b5191b68..eceab19766db 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -68,6 +68,7 @@ struct mixer_build {
68 unsigned char *buffer; 68 unsigned char *buffer;
69 unsigned int buflen; 69 unsigned int buflen;
70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); 70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
71 DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
71 struct usb_audio_term oterm; 72 struct usb_audio_term oterm;
72 const struct usbmix_name_map *map; 73 const struct usbmix_name_map *map;
73 const struct usbmix_selector_map *selector_map; 74 const struct usbmix_selector_map *selector_map;
@@ -738,12 +739,13 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
738 struct uac_mixer_unit_descriptor *desc) 739 struct uac_mixer_unit_descriptor *desc)
739{ 740{
740 int mu_channels; 741 int mu_channels;
741 void *c;
742 742
743 if (desc->bLength < sizeof(*desc)) 743 if (desc->bLength < sizeof(*desc))
744 return -EINVAL; 744 return -EINVAL;
745 if (!desc->bNrInPins) 745 if (!desc->bNrInPins)
746 return -EINVAL; 746 return -EINVAL;
747 if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
748 return -EINVAL;
747 749
748 switch (state->mixer->protocol) { 750 switch (state->mixer->protocol) {
749 case UAC_VERSION_1: 751 case UAC_VERSION_1:
@@ -759,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
759 break; 761 break;
760 } 762 }
761 763
762 if (!mu_channels)
763 return 0;
764
765 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
766 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
767 return 0; /* no bmControls -> skip */
768
769 return mu_channels; 764 return mu_channels;
770} 765}
771 766
@@ -773,16 +768,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
773 * parse the source unit recursively until it reaches to a terminal 768 * parse the source unit recursively until it reaches to a terminal
774 * or a branched unit. 769 * or a branched unit.
775 */ 770 */
776static int check_input_term(struct mixer_build *state, int id, 771static int __check_input_term(struct mixer_build *state, int id,
777 struct usb_audio_term *term) 772 struct usb_audio_term *term)
778{ 773{
779 int protocol = state->mixer->protocol; 774 int protocol = state->mixer->protocol;
780 int err; 775 int err;
781 void *p1; 776 void *p1;
777 unsigned char *hdr;
782 778
783 memset(term, 0, sizeof(*term)); 779 memset(term, 0, sizeof(*term));
784 while ((p1 = find_audio_control_unit(state, id)) != NULL) { 780 for (;;) {
785 unsigned char *hdr = p1; 781 /* a loop in the terminal chain? */
782 if (test_and_set_bit(id, state->termbitmap))
783 return -EINVAL;
784
785 p1 = find_audio_control_unit(state, id);
786 if (!p1)
787 break;
788
789 hdr = p1;
786 term->id = id; 790 term->id = id;
787 791
788 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) { 792 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -800,7 +804,7 @@ static int check_input_term(struct mixer_build *state, int id,
800 804
801 /* call recursively to verify that the 805 /* call recursively to verify that the
802 * referenced clock entity is valid */ 806 * referenced clock entity is valid */
803 err = check_input_term(state, d->bCSourceID, term); 807 err = __check_input_term(state, d->bCSourceID, term);
804 if (err < 0) 808 if (err < 0)
805 return err; 809 return err;
806 810
@@ -834,7 +838,7 @@ static int check_input_term(struct mixer_build *state, int id,
834 case UAC2_CLOCK_SELECTOR: { 838 case UAC2_CLOCK_SELECTOR: {
835 struct uac_selector_unit_descriptor *d = p1; 839 struct uac_selector_unit_descriptor *d = p1;
836 /* call recursively to retrieve the channel info */ 840 /* call recursively to retrieve the channel info */
837 err = check_input_term(state, d->baSourceID[0], term); 841 err = __check_input_term(state, d->baSourceID[0], term);
838 if (err < 0) 842 if (err < 0)
839 return err; 843 return err;
840 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ 844 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -897,7 +901,7 @@ static int check_input_term(struct mixer_build *state, int id,
897 901
898 /* call recursively to verify that the 902 /* call recursively to verify that the
899 * referenced clock entity is valid */ 903 * referenced clock entity is valid */
900 err = check_input_term(state, d->bCSourceID, term); 904 err = __check_input_term(state, d->bCSourceID, term);
901 if (err < 0) 905 if (err < 0)
902 return err; 906 return err;
903 907
@@ -948,7 +952,7 @@ static int check_input_term(struct mixer_build *state, int id,
948 case UAC3_CLOCK_SELECTOR: { 952 case UAC3_CLOCK_SELECTOR: {
949 struct uac_selector_unit_descriptor *d = p1; 953 struct uac_selector_unit_descriptor *d = p1;
950 /* call recursively to retrieve the channel info */ 954 /* call recursively to retrieve the channel info */
951 err = check_input_term(state, d->baSourceID[0], term); 955 err = __check_input_term(state, d->baSourceID[0], term);
952 if (err < 0) 956 if (err < 0)
953 return err; 957 return err;
954 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ 958 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -964,7 +968,7 @@ static int check_input_term(struct mixer_build *state, int id,
964 return -EINVAL; 968 return -EINVAL;
965 969
966 /* call recursively to retrieve the channel info */ 970 /* call recursively to retrieve the channel info */
967 err = check_input_term(state, d->baSourceID[0], term); 971 err = __check_input_term(state, d->baSourceID[0], term);
968 if (err < 0) 972 if (err < 0)
969 return err; 973 return err;
970 974
@@ -982,6 +986,15 @@ static int check_input_term(struct mixer_build *state, int id,
982 return -ENODEV; 986 return -ENODEV;
983} 987}
984 988
989
990static int check_input_term(struct mixer_build *state, int id,
991 struct usb_audio_term *term)
992{
993 memset(term, 0, sizeof(*term));
994 memset(state->termbitmap, 0, sizeof(state->termbitmap));
995 return __check_input_term(state, id, term);
996}
997
985/* 998/*
986 * Feature Unit 999 * Feature Unit
987 */ 1000 */
@@ -1988,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
1988 * Mixer Unit 2001 * Mixer Unit
1989 */ 2002 */
1990 2003
2004/* check whether the given in/out overflows bmMixerControls matrix */
2005static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
2006 int protocol, int num_ins, int num_outs)
2007{
2008 u8 *hdr = (u8 *)desc;
2009 u8 *c = uac_mixer_unit_bmControls(desc, protocol);
2010 size_t rest; /* remaining bytes after bmMixerControls */
2011
2012 switch (protocol) {
2013 case UAC_VERSION_1:
2014 default:
2015 rest = 1; /* iMixer */
2016 break;
2017 case UAC_VERSION_2:
2018 rest = 2; /* bmControls + iMixer */
2019 break;
2020 case UAC_VERSION_3:
2021 rest = 6; /* bmControls + wMixerDescrStr */
2022 break;
2023 }
2024
2025 /* overflow? */
2026 return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
2027}
2028
1991/* 2029/*
1992 * build a mixer unit control 2030 * build a mixer unit control
1993 * 2031 *
@@ -2116,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2116 if (err < 0) 2154 if (err < 0)
2117 return err; 2155 return err;
2118 num_ins += iterm.channels; 2156 num_ins += iterm.channels;
2157 if (mixer_bitmap_overflow(desc, state->mixer->protocol,
2158 num_ins, num_outs))
2159 break;
2119 for (; ich < num_ins; ich++) { 2160 for (; ich < num_ins; ich++) {
2120 int och, ich_has_controls = 0; 2161 int och, ich_has_controls = 0;
2121 2162
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 199fa157a411..27dcb3743690 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
1155{ 1155{
1156 struct usb_mixer_interface *mixer; 1156 struct usb_mixer_interface *mixer;
1157 struct usb_mixer_elem_info *cval; 1157 struct usb_mixer_elem_info *cval;
1158 int unitid = 12; /* SamleRate ExtensionUnit ID */ 1158 int unitid = 12; /* SampleRate ExtensionUnit ID */
1159 1159
1160 list_for_each_entry(mixer, &chip->mixer_list, list) { 1160 list_for_each_entry(mixer, &chip->mixer_list, list) {
1161 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); 1161 if (mixer->id_elems[unitid]) {
1162 if (cval) { 1162 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, 1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
1164 cval->control << 8, 1164 cval->control << 8,
1165 samplerate_id); 1165 samplerate_id);
1166 snd_usb_mixer_notify_id(mixer, unitid); 1166 snd_usb_mixer_notify_id(mixer, unitid);
1167 break;
1167 } 1168 }
1168 break;
1169 } 1169 }
1170} 1170}
1171 1171
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 75b96929f76c..e4bbf79de956 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
339 ep = 0x81; 339 ep = 0x81;
340 ifnum = 2; 340 ifnum = 2;
341 goto add_sync_ep_from_ifnum; 341 goto add_sync_ep_from_ifnum;
342 case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
342 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ 343 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
343 ep = 0x81; 344 ep = 0x81;
344 ifnum = 1; 345 ifnum = 1;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 7ee9d17d0143..e852c7fd6109 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -1043,6 +1043,7 @@ found_clock:
1043 1043
1044 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 1044 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
1045 if (!pd) { 1045 if (!pd) {
1046 kfree(fp->chmap);
1046 kfree(fp->rate_table); 1047 kfree(fp->rate_table);
1047 kfree(fp); 1048 kfree(fp);
1048 return NULL; 1049 return NULL;
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 4602464ebdfb..a4217c1a5d01 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -214,6 +214,18 @@ struct kvm_vcpu_events {
214#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ 214#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
215 KVM_REG_ARM_FW | ((r) & 0xffff)) 215 KVM_REG_ARM_FW | ((r) & 0xffff))
216#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) 216#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
217#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
218 /* Higher values mean better protection. */
219#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
220#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
221#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
222#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
223 /* Higher values mean better protection. */
224#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
225#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
226#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
227#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
228#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
217 229
218/* Device Control API: ARM VGIC */ 230/* Device Control API: ARM VGIC */
219#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 231#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index d819a3e8b552..9a507716ae2f 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -229,6 +229,16 @@ struct kvm_vcpu_events {
229#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 229#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
230 KVM_REG_ARM_FW | ((r) & 0xffff)) 230 KVM_REG_ARM_FW | ((r) & 0xffff))
231#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) 231#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
232#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
233#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
234#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
235#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
236#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
237#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
238#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
239#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
240#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
241#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
232 242
233/* SVE registers */ 243/* SVE registers */
234#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) 244#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/tools/arch/powerpc/include/uapi/asm/mman.h b/tools/arch/powerpc/include/uapi/asm/mman.h
index f33105bc5ca6..8601d824a9c6 100644
--- a/tools/arch/powerpc/include/uapi/asm/mman.h
+++ b/tools/arch/powerpc/include/uapi/asm/mman.h
@@ -4,12 +4,8 @@
4#define MAP_DENYWRITE 0x0800 4#define MAP_DENYWRITE 0x0800
5#define MAP_EXECUTABLE 0x1000 5#define MAP_EXECUTABLE 0x1000
6#define MAP_GROWSDOWN 0x0100 6#define MAP_GROWSDOWN 0x0100
7#define MAP_HUGETLB 0x40000
8#define MAP_LOCKED 0x80 7#define MAP_LOCKED 0x80
9#define MAP_NONBLOCK 0x10000
10#define MAP_NORESERVE 0x40 8#define MAP_NORESERVE 0x40
11#define MAP_POPULATE 0x8000
12#define MAP_STACK 0x20000
13#include <uapi/asm-generic/mman-common.h> 9#include <uapi/asm-generic/mman-common.h>
14/* MAP_32BIT is undefined on powerpc, fix it for perf */ 10/* MAP_32BIT is undefined on powerpc, fix it for perf */
15#define MAP_32BIT 0 11#define MAP_32BIT 0
diff --git a/tools/arch/sparc/include/uapi/asm/mman.h b/tools/arch/sparc/include/uapi/asm/mman.h
index 38920eed8cbf..7b94dccc843d 100644
--- a/tools/arch/sparc/include/uapi/asm/mman.h
+++ b/tools/arch/sparc/include/uapi/asm/mman.h
@@ -4,12 +4,8 @@
4#define MAP_DENYWRITE 0x0800 4#define MAP_DENYWRITE 0x0800
5#define MAP_EXECUTABLE 0x1000 5#define MAP_EXECUTABLE 0x1000
6#define MAP_GROWSDOWN 0x0200 6#define MAP_GROWSDOWN 0x0200
7#define MAP_HUGETLB 0x40000
8#define MAP_LOCKED 0x100 7#define MAP_LOCKED 0x100
9#define MAP_NONBLOCK 0x10000
10#define MAP_NORESERVE 0x40 8#define MAP_NORESERVE 0x40
11#define MAP_POPULATE 0x8000
12#define MAP_STACK 0x20000
13#include <uapi/asm-generic/mman-common.h> 9#include <uapi/asm-generic/mman-common.h>
14/* MAP_32BIT is undefined on sparc, fix it for perf */ 10/* MAP_32BIT is undefined on sparc, fix it for perf */
15#define MAP_32BIT 0 11#define MAP_32BIT 0
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index d6ab5b4d15e5..503d3f42da16 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -378,10 +378,11 @@ struct kvm_sync_regs {
378 struct kvm_vcpu_events events; 378 struct kvm_vcpu_events events;
379}; 379};
380 380
381#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) 381#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
382#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) 382#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
383#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) 383#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
384#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) 384#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
385#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
385 386
386#define KVM_STATE_NESTED_FORMAT_VMX 0 387#define KVM_STATE_NESTED_FORMAT_VMX 0
387#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */ 388#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
@@ -432,4 +433,17 @@ struct kvm_nested_state {
432 } data; 433 } data;
433}; 434};
434 435
436/* for KVM_CAP_PMU_EVENT_FILTER */
437struct kvm_pmu_event_filter {
438 __u32 action;
439 __u32 nevents;
440 __u32 fixed_counter_bitmap;
441 __u32 flags;
442 __u32 pad[4];
443 __u64 events[0];
444};
445
446#define KVM_PMU_EVENT_ALLOW 0
447#define KVM_PMU_EVENT_DENY 1
448
435#endif /* _ASM_X86_KVM_H */ 449#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index d213ec5c3766..f0b0c90dd398 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -146,7 +146,6 @@
146 146
147#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 147#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
148#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 148#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
149#define VMX_ABORT_VMCS_CORRUPTED 3
150#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 149#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
151 150
152#endif /* _UAPIVMX_H */ 151#endif /* _UAPIVMX_H */
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 5215e0870bcb..6a71324be628 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -204,7 +204,11 @@ int do_pin_fd(int fd, const char *name)
204 if (err) 204 if (err)
205 return err; 205 return err;
206 206
207 return bpf_obj_pin(fd, name); 207 err = bpf_obj_pin(fd, name);
208 if (err)
209 p_err("can't pin the object (%s): %s", name, strerror(errno));
210
211 return err;
208} 212}
209 213
210int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) 214int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
@@ -237,7 +241,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
237 241
238 fd = get_fd_by_id(id); 242 fd = get_fd_by_id(id);
239 if (fd < 0) { 243 if (fd < 0) {
240 p_err("can't get prog by id (%u): %s", id, strerror(errno)); 244 p_err("can't open object by id (%u): %s", id, strerror(errno));
241 return -1; 245 return -1;
242 } 246 }
243 247
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 66f04a4846a5..43fdbbfe41bb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
363 if (fd < 0) 363 if (fd < 0)
364 return -1; 364 return -1;
365 365
366 return show_prog(fd); 366 err = show_prog(fd);
367 close(fd);
368 return err;
367 } 369 }
368 370
369 if (argc) 371 if (argc)
diff --git a/tools/hv/hv_get_dhcp_info.sh b/tools/hv/hv_get_dhcp_info.sh
index c38686c44656..2f2a3c7df3de 100755
--- a/tools/hv/hv_get_dhcp_info.sh
+++ b/tools/hv/hv_get_dhcp_info.sh
@@ -13,7 +13,7 @@
13# the script prints the string "Disabled" to stdout. 13# the script prints the string "Disabled" to stdout.
14# 14#
15# Each Distro is expected to implement this script in a distro specific 15# Each Distro is expected to implement this script in a distro specific
16# fashion. For instance on Distros that ship with Network Manager enabled, 16# fashion. For instance, on Distros that ship with Network Manager enabled,
17# this script can be based on the Network Manager APIs for retrieving DHCP 17# this script can be based on the Network Manager APIs for retrieving DHCP
18# information. 18# information.
19 19
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d7e06fe0270e..e9ef4ca6a655 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -700,7 +700,7 @@ static void kvp_get_ipconfig_info(char *if_name,
700 700
701 701
702 /* 702 /*
703 * Gather the DNS state. 703 * Gather the DNS state.
704 * Since there is no standard way to get this information 704 * Since there is no standard way to get this information
705 * across various distributions of interest; we just invoke 705 * across various distributions of interest; we just invoke
706 * an external script that needs to be ported across distros 706 * an external script that needs to be ported across distros
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
809 int sn_offset = 0; 809 int sn_offset = 0;
810 int error = 0; 810 int error = 0;
811 char *buffer; 811 char *buffer;
812 struct hv_kvp_ipaddr_value *ip_buffer; 812 struct hv_kvp_ipaddr_value *ip_buffer = NULL;
813 char cidr_mask[5]; /* /xyz */ 813 char cidr_mask[5]; /* /xyz */
814 int weight; 814 int weight;
815 int i; 815 int i;
@@ -1051,7 +1051,7 @@ static int parse_ip_val_buffer(char *in_buf, int *offset,
1051 char *start; 1051 char *start;
1052 1052
1053 /* 1053 /*
1054 * in_buf has sequence of characters that are seperated by 1054 * in_buf has sequence of characters that are separated by
1055 * the character ';'. The last sequence does not have the 1055 * the character ';'. The last sequence does not have the
1056 * terminating ";" character. 1056 * terminating ";" character.
1057 */ 1057 */
@@ -1386,6 +1386,8 @@ int main(int argc, char *argv[])
1386 daemonize = 0; 1386 daemonize = 0;
1387 break; 1387 break;
1388 case 'h': 1388 case 'h':
1389 print_usage(argv);
1390 exit(0);
1389 default: 1391 default:
1390 print_usage(argv); 1392 print_usage(argv);
1391 exit(EXIT_FAILURE); 1393 exit(EXIT_FAILURE);
@@ -1490,7 +1492,7 @@ int main(int argc, char *argv[])
1490 case KVP_OP_GET_IP_INFO: 1492 case KVP_OP_GET_IP_INFO:
1491 kvp_ip_val = &hv_msg->body.kvp_ip_val; 1493 kvp_ip_val = &hv_msg->body.kvp_ip_val;
1492 1494
1493 error = kvp_mac_to_ip(kvp_ip_val); 1495 error = kvp_mac_to_ip(kvp_ip_val);
1494 1496
1495 if (error) 1497 if (error)
1496 hv_msg->error = error; 1498 hv_msg->error = error;
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
index 7ed9f85ef908..d10fe35b7f25 100755
--- a/tools/hv/hv_set_ifconfig.sh
+++ b/tools/hv/hv_set_ifconfig.sh
@@ -12,7 +12,7 @@
12# be used to configure the interface. 12# be used to configure the interface.
13# 13#
14# Each Distro is expected to implement this script in a distro specific 14# Each Distro is expected to implement this script in a distro specific
15# fashion. For instance on Distros that ship with Network Manager enabled, 15# fashion. For instance, on Distros that ship with Network Manager enabled,
16# this script can be based on the Network Manager APIs for configuring the 16# this script can be based on the Network Manager APIs for configuring the
17# interface. 17# interface.
18# 18#
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index efe1e34dd91b..92902a88f671 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -42,7 +42,7 @@ static int vss_do_freeze(char *dir, unsigned int cmd)
42 * If a partition is mounted more than once, only the first 42 * If a partition is mounted more than once, only the first
43 * FREEZE/THAW can succeed and the later ones will get 43 * FREEZE/THAW can succeed and the later ones will get
44 * EBUSY/EINVAL respectively: there could be 2 cases: 44 * EBUSY/EINVAL respectively: there could be 2 cases:
45 * 1) a user may mount the same partition to differnt directories 45 * 1) a user may mount the same partition to different directories
46 * by mistake or on purpose; 46 * by mistake or on purpose;
47 * 2) The subvolume of btrfs appears to have the same partition 47 * 2) The subvolume of btrfs appears to have the same partition
48 * mounted more than once. 48 * mounted more than once.
@@ -218,6 +218,8 @@ int main(int argc, char *argv[])
218 daemonize = 0; 218 daemonize = 0;
219 break; 219 break;
220 case 'h': 220 case 'h':
221 print_usage(argv);
222 exit(0);
221 default: 223 default:
222 print_usage(argv); 224 print_usage(argv);
223 exit(EXIT_FAILURE); 225 exit(EXIT_FAILURE);
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
index 55e7374bade0..099f2c44dbed 100644
--- a/tools/hv/lsvmbus
+++ b/tools/hv/lsvmbus
@@ -4,10 +4,10 @@
4import os 4import os
5from optparse import OptionParser 5from optparse import OptionParser
6 6
7help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages"
7parser = OptionParser() 8parser = OptionParser()
8parser.add_option("-v", "--verbose", dest="verbose", 9parser.add_option(
9 help="print verbose messages. Try -vv, -vvv for \ 10 "-v", "--verbose", dest="verbose", help=help_msg, action="count")
10 more verbose messages", action="count")
11 11
12(options, args) = parser.parse_args() 12(options, args) = parser.parse_args()
13 13
@@ -21,27 +21,28 @@ if not os.path.isdir(vmbus_sys_path):
21 exit(-1) 21 exit(-1)
22 22
23vmbus_dev_dict = { 23vmbus_dev_dict = {
24 '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]', 24 '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
25 '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]', 25 '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
26 '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]', 26 '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
27 '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]', 27 '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
28 '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]', 28 '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
29 '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]', 29 '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
30 '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]', 30 '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
31 '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse', 31 '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
32 '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard', 32 '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
33 '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter', 33 '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
34 '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter', 34 '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
35 '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller', 35 '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
36 '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', 36 '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
37 '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', 37 '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
38 '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', 38 '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
39 '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through', 39 '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
40 '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', 40 '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
41 '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', 41 '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
42 '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', 42 '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
43} 43}
44 44
45
45def get_vmbus_dev_attr(dev_name, attr): 46def get_vmbus_dev_attr(dev_name, attr):
46 try: 47 try:
47 f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r') 48 f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
@@ -52,6 +53,7 @@ def get_vmbus_dev_attr(dev_name, attr):
52 53
53 return lines 54 return lines
54 55
56
55class VMBus_Dev: 57class VMBus_Dev:
56 pass 58 pass
57 59
@@ -66,12 +68,13 @@ for f in os.listdir(vmbus_sys_path):
66 68
67 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping') 69 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
68 chn_vp_mapping = [c.strip() for c in chn_vp_mapping] 70 chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
69 chn_vp_mapping = sorted(chn_vp_mapping, 71 chn_vp_mapping = sorted(
70 key = lambda c : int(c.split(':')[0])) 72 chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
71 73
72 chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' % 74 chn_vp_mapping = [
73 (c.split(':')[0], c.split(':')[1]) 75 '\tRel_ID=%s, target_cpu=%s' %
74 for c in chn_vp_mapping] 76 (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
77 ]
75 d = VMBus_Dev() 78 d = VMBus_Dev()
76 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f) 79 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
77 d.vmbus_id = vmbus_id 80 d.vmbus_id = vmbus_id
@@ -85,7 +88,7 @@ for f in os.listdir(vmbus_sys_path):
85 vmbus_dev_list.append(d) 88 vmbus_dev_list.append(d)
86 89
87 90
88vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id)) 91vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
89 92
90format0 = '%2s: %s' 93format0 = '%2s: %s'
91format1 = '%2s: Class_ID = %s - %s\n%s' 94format1 = '%2s: Class_ID = %s - %s\n%s'
@@ -95,9 +98,15 @@ for d in vmbus_dev_list:
95 if verbose == 0: 98 if verbose == 0:
96 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc)) 99 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
97 elif verbose == 1: 100 elif verbose == 1:
98 print (('VMBUS ID ' + format1) % \ 101 print(
99 (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)) 102 ('VMBUS ID ' + format1) %
103 (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
104 )
100 else: 105 else:
101 print (('VMBUS ID ' + format2) % \ 106 print(
102 (d.vmbus_id, d.class_id, d.dev_desc, \ 107 ('VMBUS ID ' + format2) %
103 d.device_id, d.sysfs_path, d.chn_vp_mapping)) 108 (
109 d.vmbus_id, d.class_id, d.dev_desc,
110 d.device_id, d.sysfs_path, d.chn_vp_mapping
111 )
112 )
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index abd238d0f7a4..63b1f506ea67 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -19,15 +19,18 @@
19#define MAP_TYPE 0x0f /* Mask for type of mapping */ 19#define MAP_TYPE 0x0f /* Mask for type of mapping */
20#define MAP_FIXED 0x10 /* Interpret addr exactly */ 20#define MAP_FIXED 0x10 /* Interpret addr exactly */
21#define MAP_ANONYMOUS 0x20 /* don't use a file */ 21#define MAP_ANONYMOUS 0x20 /* don't use a file */
22#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
23# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
24#else
25# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
26#endif
27 22
28/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ 23/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
24#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */
25#define MAP_NONBLOCK 0x010000 /* do not block on IO */
26#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */
27#define MAP_HUGETLB 0x040000 /* create a huge page mapping */
28#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */
29#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ 29#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
30 30
31#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
32 * uninitialized */
33
31/* 34/*
32 * Flags for mlock 35 * Flags for mlock
33 */ 36 */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 36c197fc44a0..406f7718f9ad 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -9,13 +9,11 @@
9#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 9#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
10#define MAP_LOCKED 0x2000 /* pages are locked */ 10#define MAP_LOCKED 0x2000 /* pages are locked */
11#define MAP_NORESERVE 0x4000 /* don't check for reservations */ 11#define MAP_NORESERVE 0x4000 /* don't check for reservations */
12#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
13#define MAP_NONBLOCK 0x10000 /* do not block on IO */
14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
16#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
17 12
18/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ 13/*
14 * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h
15 * for MAP_HUGETLB usage
16 */
19 17
20#define MCL_CURRENT 1 /* lock all current mappings */ 18#define MCL_CURRENT 1 /* lock all current mappings */
21#define MCL_FUTURE 2 /* lock all future mappings */ 19#define MCL_FUTURE 2 /* lock all future mappings */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index a87904daf103..1be0e798e362 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -844,9 +844,15 @@ __SYSCALL(__NR_fsconfig, sys_fsconfig)
844__SYSCALL(__NR_fsmount, sys_fsmount) 844__SYSCALL(__NR_fsmount, sys_fsmount)
845#define __NR_fspick 433 845#define __NR_fspick 433
846__SYSCALL(__NR_fspick, sys_fspick) 846__SYSCALL(__NR_fspick, sys_fspick)
847#define __NR_pidfd_open 434
848__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
849#ifdef __ARCH_WANT_SYS_CLONE3
850#define __NR_clone3 435
851__SYSCALL(__NR_clone3, sys_clone3)
852#endif
847 853
848#undef __NR_syscalls 854#undef __NR_syscalls
849#define __NR_syscalls 434 855#define __NR_syscalls 436
850 856
851/* 857/*
852 * 32 bit systems traditionally used different 858 * 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 661d73f9a919..8a5b2f8f8eb9 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t;
50 50
51#else /* One of the BSDs */ 51#else /* One of the BSDs */
52 52
53#include <stdint.h>
53#include <sys/ioccom.h> 54#include <sys/ioccom.h>
54#include <sys/types.h> 55#include <sys/types.h>
55typedef int8_t __s8; 56typedef int8_t __s8;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 3a73f5316766..328d05e77d9f 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class {
136struct i915_engine_class_instance { 136struct i915_engine_class_instance {
137 __u16 engine_class; /* see enum drm_i915_gem_engine_class */ 137 __u16 engine_class; /* see enum drm_i915_gem_engine_class */
138 __u16 engine_instance; 138 __u16 engine_instance;
139#define I915_ENGINE_CLASS_INVALID_NONE -1
140#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
139}; 141};
140 142
141/** 143/**
@@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea {
355#define DRM_I915_PERF_ADD_CONFIG 0x37 357#define DRM_I915_PERF_ADD_CONFIG 0x37
356#define DRM_I915_PERF_REMOVE_CONFIG 0x38 358#define DRM_I915_PERF_REMOVE_CONFIG 0x38
357#define DRM_I915_QUERY 0x39 359#define DRM_I915_QUERY 0x39
360#define DRM_I915_GEM_VM_CREATE 0x3a
361#define DRM_I915_GEM_VM_DESTROY 0x3b
358/* Must be kept compact -- no holes */ 362/* Must be kept compact -- no holes */
359 363
360#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 364#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea {
415#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 419#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
416#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 420#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
417#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 421#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
422#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
423#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
418 424
419/* Allow drivers to submit batchbuffers directly to hardware, relying 425/* Allow drivers to submit batchbuffers directly to hardware, relying
420 * on the security mechanisms provided by hardware. 426 * on the security mechanisms provided by hardware.
@@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait {
598 */ 604 */
599#define I915_PARAM_MMAP_GTT_COHERENT 52 605#define I915_PARAM_MMAP_GTT_COHERENT 52
600 606
607/*
608 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
609 * execution through use of explicit fence support.
610 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
611 */
612#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
601/* Must be kept compact -- no holes and well documented */ 613/* Must be kept compact -- no holes and well documented */
602 614
603typedef struct drm_i915_getparam { 615typedef struct drm_i915_getparam {
@@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 {
1120 */ 1132 */
1121#define I915_EXEC_FENCE_ARRAY (1<<19) 1133#define I915_EXEC_FENCE_ARRAY (1<<19)
1122 1134
1123#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) 1135/*
1136 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1137 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1138 * the batch.
1139 *
1140 * Returns -EINVAL if the sync_file fd cannot be found.
1141 */
1142#define I915_EXEC_FENCE_SUBMIT (1 << 20)
1143
1144#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
1124 1145
1125#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1146#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1126#define i915_execbuffer2_set_context_id(eb2, context) \ 1147#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext {
1464 __u32 ctx_id; /* output: id of new context*/ 1485 __u32 ctx_id; /* output: id of new context*/
1465 __u32 flags; 1486 __u32 flags;
1466#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 1487#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
1488#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
1467#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 1489#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1468 (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1)) 1490 (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1469 __u64 extensions; 1491 __u64 extensions;
1470}; 1492};
1471 1493
@@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param {
1507 * On creation, all new contexts are marked as recoverable. 1529 * On creation, all new contexts are marked as recoverable.
1508 */ 1530 */
1509#define I915_CONTEXT_PARAM_RECOVERABLE 0x8 1531#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1532
1533 /*
1534 * The id of the associated virtual memory address space (ppGTT) of
1535 * this context. Can be retrieved and passed to another context
1536 * (on the same fd) for both to use the same ppGTT and so share
1537 * address layouts, and avoid reloading the page tables on context
1538 * switches between themselves.
1539 *
1540 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
1541 */
1542#define I915_CONTEXT_PARAM_VM 0x9
1543
1544/*
1545 * I915_CONTEXT_PARAM_ENGINES:
1546 *
1547 * Bind this context to operate on this subset of available engines. Henceforth,
1548 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1549 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1550 * and upwards. Slots 0...N are filled in using the specified (class, instance).
1551 * Use
1552 * engine_class: I915_ENGINE_CLASS_INVALID,
1553 * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1554 * to specify a gap in the array that can be filled in later, e.g. by a
1555 * virtual engine used for load balancing.
1556 *
1557 * Setting the number of engines bound to the context to 0, by passing a zero
1558 * sized argument, will revert back to default settings.
1559 *
1560 * See struct i915_context_param_engines.
1561 *
1562 * Extensions:
1563 * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1564 * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1565 */
1566#define I915_CONTEXT_PARAM_ENGINES 0xa
1510/* Must be kept compact -- no holes and well documented */ 1567/* Must be kept compact -- no holes and well documented */
1511 1568
1512 __u64 value; 1569 __u64 value;
@@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu {
1540 struct i915_engine_class_instance engine; 1597 struct i915_engine_class_instance engine;
1541 1598
1542 /* 1599 /*
1543 * Unused for now. Must be cleared to zero. 1600 * Unknown flags must be cleared to zero.
1544 */ 1601 */
1545 __u32 flags; 1602 __u32 flags;
1603#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1546 1604
1547 /* 1605 /*
1548 * Mask of slices to enable for the context. Valid values are a subset 1606 * Mask of slices to enable for the context. Valid values are a subset
@@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu {
1570 __u32 rsvd; 1628 __u32 rsvd;
1571}; 1629};
1572 1630
1631/*
1632 * i915_context_engines_load_balance:
1633 *
1634 * Enable load balancing across this set of engines.
1635 *
1636 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
1637 * used will proxy the execbuffer request onto one of the set of engines
1638 * in such a way as to distribute the load evenly across the set.
1639 *
1640 * The set of engines must be compatible (e.g. the same HW class) as they
1641 * will share the same logical GPU context and ring.
1642 *
1643 * To intermix rendering with the virtual engine and direct rendering onto
1644 * the backing engines (bypassing the load balancing proxy), the context must
1645 * be defined to use a single timeline for all engines.
1646 */
1647struct i915_context_engines_load_balance {
1648 struct i915_user_extension base;
1649
1650 __u16 engine_index;
1651 __u16 num_siblings;
1652 __u32 flags; /* all undefined flags must be zero */
1653
1654 __u64 mbz64; /* reserved for future use; must be zero */
1655
1656 struct i915_engine_class_instance engines[0];
1657} __attribute__((packed));
1658
1659#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
1660 struct i915_user_extension base; \
1661 __u16 engine_index; \
1662 __u16 num_siblings; \
1663 __u32 flags; \
1664 __u64 mbz64; \
1665 struct i915_engine_class_instance engines[N__]; \
1666} __attribute__((packed)) name__
1667
1668/*
1669 * i915_context_engines_bond:
1670 *
1671 * Constructed bonded pairs for execution within a virtual engine.
1672 *
1673 * All engines are equal, but some are more equal than others. Given
1674 * the distribution of resources in the HW, it may be preferable to run
1675 * a request on a given subset of engines in parallel to a request on a
1676 * specific engine. We enable this selection of engines within a virtual
1677 * engine by specifying bonding pairs, for any given master engine we will
1678 * only execute on one of the corresponding siblings within the virtual engine.
1679 *
1680 * To execute a request in parallel on the master engine and a sibling requires
1681 * coordination with a I915_EXEC_FENCE_SUBMIT.
1682 */
1683struct i915_context_engines_bond {
1684 struct i915_user_extension base;
1685
1686 struct i915_engine_class_instance master;
1687
1688 __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
1689 __u16 num_bonds;
1690
1691 __u64 flags; /* all undefined flags must be zero */
1692 __u64 mbz64[4]; /* reserved for future use; must be zero */
1693
1694 struct i915_engine_class_instance engines[0];
1695} __attribute__((packed));
1696
1697#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
1698 struct i915_user_extension base; \
1699 struct i915_engine_class_instance master; \
1700 __u16 virtual_index; \
1701 __u16 num_bonds; \
1702 __u64 flags; \
1703 __u64 mbz64[4]; \
1704 struct i915_engine_class_instance engines[N__]; \
1705} __attribute__((packed)) name__
1706
1707struct i915_context_param_engines {
1708 __u64 extensions; /* linked chain of extension blocks, 0 terminates */
1709#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
1710#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
1711 struct i915_engine_class_instance engines[0];
1712} __attribute__((packed));
1713
1714#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
1715 __u64 extensions; \
1716 struct i915_engine_class_instance engines[N__]; \
1717} __attribute__((packed)) name__
1718
1573struct drm_i915_gem_context_create_ext_setparam { 1719struct drm_i915_gem_context_create_ext_setparam {
1574#define I915_CONTEXT_CREATE_EXT_SETPARAM 0 1720#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1575 struct i915_user_extension base; 1721 struct i915_user_extension base;
1576 struct drm_i915_gem_context_param param; 1722 struct drm_i915_gem_context_param param;
1577}; 1723};
1578 1724
1725struct drm_i915_gem_context_create_ext_clone {
1726#define I915_CONTEXT_CREATE_EXT_CLONE 1
1727 struct i915_user_extension base;
1728 __u32 clone_id;
1729 __u32 flags;
1730#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
1731#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
1732#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
1733#define I915_CONTEXT_CLONE_SSEU (1u << 3)
1734#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
1735#define I915_CONTEXT_CLONE_VM (1u << 5)
1736#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
1737 __u64 rsvd;
1738};
1739
1579struct drm_i915_gem_context_destroy { 1740struct drm_i915_gem_context_destroy {
1580 __u32 ctx_id; 1741 __u32 ctx_id;
1581 __u32 pad; 1742 __u32 pad;
@@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config {
1821struct drm_i915_query_item { 1982struct drm_i915_query_item {
1822 __u64 query_id; 1983 __u64 query_id;
1823#define DRM_I915_QUERY_TOPOLOGY_INFO 1 1984#define DRM_I915_QUERY_TOPOLOGY_INFO 1
1985#define DRM_I915_QUERY_ENGINE_INFO 2
1824/* Must be kept compact -- no holes and well documented */ 1986/* Must be kept compact -- no holes and well documented */
1825 1987
1826 /* 1988 /*
@@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info {
1919 __u8 data[]; 2081 __u8 data[];
1920}; 2082};
1921 2083
2084/**
2085 * struct drm_i915_engine_info
2086 *
2087 * Describes one engine and it's capabilities as known to the driver.
2088 */
2089struct drm_i915_engine_info {
2090 /** Engine class and instance. */
2091 struct i915_engine_class_instance engine;
2092
2093 /** Reserved field. */
2094 __u32 rsvd0;
2095
2096 /** Engine flags. */
2097 __u64 flags;
2098
2099 /** Capabilities of this engine. */
2100 __u64 capabilities;
2101#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
2102#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
2103
2104 /** Reserved fields. */
2105 __u64 rsvd1[4];
2106};
2107
2108/**
2109 * struct drm_i915_query_engine_info
2110 *
2111 * Engine info query enumerates all engines known to the driver by filling in
2112 * an array of struct drm_i915_engine_info structures.
2113 */
2114struct drm_i915_query_engine_info {
2115 /** Number of struct drm_i915_engine_info structs following. */
2116 __u32 num_engines;
2117
2118 /** MBZ */
2119 __u32 rsvd[3];
2120
2121 /** Marker for drm_i915_engine_info structures. */
2122 struct drm_i915_engine_info engines[];
2123};
2124
1922#if defined(__cplusplus) 2125#if defined(__cplusplus)
1923} 2126}
1924#endif 2127#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4e455018da65..a5aa7d3ac6a1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
@@ -1571,8 +1571,11 @@ union bpf_attr {
1571 * but this is only implemented for native XDP (with driver 1571 * but this is only implemented for native XDP (with driver
1572 * support) as of this writing). 1572 * support) as of this writing).
1573 * 1573 *
1574 * All values for *flags* are reserved for future usage, and must 1574 * The lower two bits of *flags* are used as the return code if
1575 * be left at zero. 1575 * the map lookup fails. This is so that the return value can be
1576 * one of the XDP program return codes up to XDP_TX, as chosen by
1577 * the caller. Any higher bits in the *flags* argument must be
1578 * unset.
1576 * 1579 *
1577 * When used to redirect packets to net devices, this helper 1580 * When used to redirect packets to net devices, this helper
1578 * provides a high performance increase over **bpf_redirect**\ (). 1581 * provides a high performance increase over **bpf_redirect**\ ().
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 7d113a9602f0..4a8c02cafa9a 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -695,6 +695,7 @@ enum {
695 IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ 695 IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
696 IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ 696 IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
697 IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ 697 IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
698 IFLA_VF_BROADCAST, /* VF broadcast */
698 __IFLA_VF_MAX, 699 __IFLA_VF_MAX,
699}; 700};
700 701
@@ -705,6 +706,10 @@ struct ifla_vf_mac {
705 __u8 mac[32]; /* MAX_ADDR_LEN */ 706 __u8 mac[32]; /* MAX_ADDR_LEN */
706}; 707};
707 708
709struct ifla_vf_broadcast {
710 __u8 broadcast[32];
711};
712
708struct ifla_vf_vlan { 713struct ifla_vf_vlan {
709 __u32 vf; 714 __u32 vf;
710 __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ 715 __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index c2152f3dd02d..5e3f12d5359e 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -116,7 +116,7 @@ struct kvm_irq_level {
116 * ACPI gsi notion of irq. 116 * ACPI gsi notion of irq.
117 * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. 117 * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
118 * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. 118 * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
119 * For ARM: See Documentation/virtual/kvm/api.txt 119 * For ARM: See Documentation/virt/kvm/api.txt
120 */ 120 */
121 union { 121 union {
122 __u32 irq; 122 __u32 irq;
@@ -995,6 +995,7 @@ struct kvm_ppc_resize_hpt {
995#define KVM_CAP_ARM_SVE 170 995#define KVM_CAP_ARM_SVE 170
996#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 996#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
997#define KVM_CAP_ARM_PTRAUTH_GENERIC 172 997#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
998#define KVM_CAP_PMU_EVENT_FILTER 173
998 999
999#ifdef KVM_CAP_IRQ_ROUTING 1000#ifdef KVM_CAP_IRQ_ROUTING
1000 1001
@@ -1085,7 +1086,7 @@ struct kvm_xen_hvm_config {
1085 * 1086 *
1086 * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies 1087 * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
1087 * the irqfd to operate in resampling mode for level triggered interrupt 1088 * the irqfd to operate in resampling mode for level triggered interrupt
1088 * emulation. See Documentation/virtual/kvm/api.txt. 1089 * emulation. See Documentation/virt/kvm/api.txt.
1089 */ 1090 */
1090#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) 1091#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
1091 1092
@@ -1329,6 +1330,8 @@ struct kvm_s390_ucas_mapping {
1329#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) 1330#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
1330/* Available with KVM_CAP_PPC_GET_CPU_CHAR */ 1331/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
1331#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) 1332#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
1333/* Available with KVM_CAP_PMU_EVENT_FILTER */
1334#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
1332 1335
1333/* ioctl for vm fd */ 1336/* ioctl for vm fd */
1334#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) 1337#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index ed4ee170bee2..b3105ac1381a 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -2,6 +2,8 @@
2#ifndef _UAPI_LINUX_SCHED_H 2#ifndef _UAPI_LINUX_SCHED_H
3#define _UAPI_LINUX_SCHED_H 3#define _UAPI_LINUX_SCHED_H
4 4
5#include <linux/types.h>
6
5/* 7/*
6 * cloning flags: 8 * cloning flags:
7 */ 9 */
@@ -32,6 +34,20 @@
32#define CLONE_IO 0x80000000 /* Clone io context */ 34#define CLONE_IO 0x80000000 /* Clone io context */
33 35
34/* 36/*
37 * Arguments for the clone3 syscall
38 */
39struct clone_args {
40 __aligned_u64 flags;
41 __aligned_u64 pidfd;
42 __aligned_u64 child_tid;
43 __aligned_u64 parent_tid;
44 __aligned_u64 exit_signal;
45 __aligned_u64 stack;
46 __aligned_u64 stack_size;
47 __aligned_u64 tls;
48};
49
50/*
35 * Scheduling policies 51 * Scheduling policies
36 */ 52 */
37#define SCHED_NORMAL 0 53#define SCHED_NORMAL 0
@@ -51,9 +67,21 @@
51#define SCHED_FLAG_RESET_ON_FORK 0x01 67#define SCHED_FLAG_RESET_ON_FORK 0x01
52#define SCHED_FLAG_RECLAIM 0x02 68#define SCHED_FLAG_RECLAIM 0x02
53#define SCHED_FLAG_DL_OVERRUN 0x04 69#define SCHED_FLAG_DL_OVERRUN 0x04
70#define SCHED_FLAG_KEEP_POLICY 0x08
71#define SCHED_FLAG_KEEP_PARAMS 0x10
72#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20
73#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40
74
75#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \
76 SCHED_FLAG_KEEP_PARAMS)
77
78#define SCHED_FLAG_UTIL_CLAMP (SCHED_FLAG_UTIL_CLAMP_MIN | \
79 SCHED_FLAG_UTIL_CLAMP_MAX)
54 80
55#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \ 81#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
56 SCHED_FLAG_RECLAIM | \ 82 SCHED_FLAG_RECLAIM | \
57 SCHED_FLAG_DL_OVERRUN) 83 SCHED_FLAG_DL_OVERRUN | \
84 SCHED_FLAG_KEEP_ALL | \
85 SCHED_FLAG_UTIL_CLAMP)
58 86
59#endif /* _UAPI_LINUX_SCHED_H */ 87#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h
index 964e87217be4..78efe870c2b7 100644
--- a/tools/include/uapi/linux/usbdevice_fs.h
+++ b/tools/include/uapi/linux/usbdevice_fs.h
@@ -76,6 +76,26 @@ struct usbdevfs_connectinfo {
76 unsigned char slow; 76 unsigned char slow;
77}; 77};
78 78
79struct usbdevfs_conninfo_ex {
80 __u32 size; /* Size of the structure from the kernel's */
81 /* point of view. Can be used by userspace */
82 /* to determine how much data can be */
83 /* used/trusted. */
84 __u32 busnum; /* USB bus number, as enumerated by the */
85 /* kernel, the device is connected to. */
86 __u32 devnum; /* Device address on the bus. */
87 __u32 speed; /* USB_SPEED_* constants from ch9.h */
88 __u8 num_ports; /* Number of ports the device is connected */
89 /* to on the way to the root hub. It may */
90 /* be bigger than size of 'ports' array so */
91 /* userspace can detect overflows. */
92 __u8 ports[7]; /* List of ports on the way from the root */
93 /* hub to the device. Current limit in */
94 /* USB specification is 7 tiers (root hub, */
95 /* 5 intermediate hubs, device), which */
96 /* gives at most 6 port entries. */
97};
98
79#define USBDEVFS_URB_SHORT_NOT_OK 0x01 99#define USBDEVFS_URB_SHORT_NOT_OK 0x01
80#define USBDEVFS_URB_ISO_ASAP 0x02 100#define USBDEVFS_URB_ISO_ASAP 0x02
81#define USBDEVFS_URB_BULK_CONTINUATION 0x04 101#define USBDEVFS_URB_BULK_CONTINUATION 0x04
@@ -137,6 +157,7 @@ struct usbdevfs_hub_portinfo {
137#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 157#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
138#define USBDEVFS_CAP_MMAP 0x20 158#define USBDEVFS_CAP_MMAP 0x20
139#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 159#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40
160#define USBDEVFS_CAP_CONNINFO_EX 0x80
140 161
141/* USBDEVFS_DISCONNECT_CLAIM flags & struct */ 162/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
142 163
@@ -197,5 +218,10 @@ struct usbdevfs_streams {
197#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams) 218#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams)
198#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32) 219#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32)
199#define USBDEVFS_GET_SPEED _IO('U', 31) 220#define USBDEVFS_GET_SPEED _IO('U', 31)
221/*
222 * Returns struct usbdevfs_conninfo_ex; length is variable to allow
223 * extending size of the data returned.
224 */
225#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
200 226
201#endif /* _UAPI_LINUX_USBDEVICE_FS_H */ 227#endif /* _UAPI_LINUX_USBDEVICE_FS_H */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 467224feb43b..d821107f55f9 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#include <endian.h>
4#include <stdio.h> 5#include <stdio.h>
5#include <stdlib.h> 6#include <stdlib.h>
6#include <string.h> 7#include <string.h>
@@ -419,9 +420,9 @@ done:
419 420
420static bool btf_check_endianness(const GElf_Ehdr *ehdr) 421static bool btf_check_endianness(const GElf_Ehdr *ehdr)
421{ 422{
422#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 423#if __BYTE_ORDER == __LITTLE_ENDIAN
423 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; 424 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
424#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 425#elif __BYTE_ORDER == __BIG_ENDIAN
425 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; 426 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
426#else 427#else
427# error "Unrecognized __BYTE_ORDER__" 428# error "Unrecognized __BYTE_ORDER__"
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index 03748a742146..bae8879cdf58 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -10,6 +10,11 @@
10 10
11#include <stdbool.h> 11#include <stdbool.h>
12#include <stddef.h> 12#include <stddef.h>
13#ifdef __GLIBC__
14#include <bits/wordsize.h>
15#else
16#include <bits/reg.h>
17#endif
13#include "libbpf_internal.h" 18#include "libbpf_internal.h"
14 19
15static inline size_t hash_bits(size_t h, int bits) 20static inline size_t hash_bits(size_t h, int bits)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 794dd5064ae8..2b57d7ea7836 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -20,6 +20,7 @@
20#include <inttypes.h> 20#include <inttypes.h>
21#include <string.h> 21#include <string.h>
22#include <unistd.h> 22#include <unistd.h>
23#include <endian.h>
23#include <fcntl.h> 24#include <fcntl.h>
24#include <errno.h> 25#include <errno.h>
25#include <asm/unistd.h> 26#include <asm/unistd.h>
@@ -181,7 +182,6 @@ struct bpf_program {
181 bpf_program_clear_priv_t clear_priv; 182 bpf_program_clear_priv_t clear_priv;
182 183
183 enum bpf_attach_type expected_attach_type; 184 enum bpf_attach_type expected_attach_type;
184 int btf_fd;
185 void *func_info; 185 void *func_info;
186 __u32 func_info_rec_size; 186 __u32 func_info_rec_size;
187 __u32 func_info_cnt; 187 __u32 func_info_cnt;
@@ -312,7 +312,6 @@ void bpf_program__unload(struct bpf_program *prog)
312 prog->instances.nr = -1; 312 prog->instances.nr = -1;
313 zfree(&prog->instances.fds); 313 zfree(&prog->instances.fds);
314 314
315 zclose(prog->btf_fd);
316 zfree(&prog->func_info); 315 zfree(&prog->func_info);
317 zfree(&prog->line_info); 316 zfree(&prog->line_info);
318} 317}
@@ -391,7 +390,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
391 prog->instances.fds = NULL; 390 prog->instances.fds = NULL;
392 prog->instances.nr = -1; 391 prog->instances.nr = -1;
393 prog->type = BPF_PROG_TYPE_UNSPEC; 392 prog->type = BPF_PROG_TYPE_UNSPEC;
394 prog->btf_fd = -1;
395 393
396 return 0; 394 return 0;
397errout: 395errout:
@@ -612,10 +610,10 @@ errout:
612 610
613static int bpf_object__check_endianness(struct bpf_object *obj) 611static int bpf_object__check_endianness(struct bpf_object *obj)
614{ 612{
615#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 613#if __BYTE_ORDER == __LITTLE_ENDIAN
616 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) 614 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
617 return 0; 615 return 0;
618#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 616#elif __BYTE_ORDER == __BIG_ENDIAN
619 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) 617 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
620 return 0; 618 return 0;
621#else 619#else
@@ -1377,8 +1375,13 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
1377 if (!has_datasec && kind == BTF_KIND_VAR) { 1375 if (!has_datasec && kind == BTF_KIND_VAR) {
1378 /* replace VAR with INT */ 1376 /* replace VAR with INT */
1379 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); 1377 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1380 t->size = sizeof(int); 1378 /*
1381 *(int *)(t+1) = BTF_INT_ENC(0, 0, 32); 1379 * using size = 1 is the safest choice, 4 will be too
1380 * big and cause kernel BTF validation failure if
1381 * original variable took less than 4 bytes
1382 */
1383 t->size = 1;
1384 *(int *)(t+1) = BTF_INT_ENC(0, 0, 8);
1382 } else if (!has_datasec && kind == BTF_KIND_DATASEC) { 1385 } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
1383 /* replace DATASEC with STRUCT */ 1386 /* replace DATASEC with STRUCT */
1384 struct btf_var_secinfo *v = (void *)(t + 1); 1387 struct btf_var_secinfo *v = (void *)(t + 1);
@@ -1500,6 +1503,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1500 BTF_ELF_SEC, err); 1503 BTF_ELF_SEC, err);
1501 btf__free(obj->btf); 1504 btf__free(obj->btf);
1502 obj->btf = NULL; 1505 obj->btf = NULL;
1506 /* btf_ext can't exist without btf, so free it as well */
1507 if (obj->btf_ext) {
1508 btf_ext__free(obj->btf_ext);
1509 obj->btf_ext = NULL;
1510 }
1511
1503 if (bpf_object__is_btf_mandatory(obj)) 1512 if (bpf_object__is_btf_mandatory(obj))
1504 return err; 1513 return err;
1505 } 1514 }
@@ -2276,9 +2285,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2276 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 2285 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2277 } 2286 }
2278 2287
2279 if (!insn_offset)
2280 prog->btf_fd = btf__fd(obj->btf);
2281
2282 return 0; 2288 return 0;
2283} 2289}
2284 2290
@@ -2451,7 +2457,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2451 char *cp, errmsg[STRERR_BUFSIZE]; 2457 char *cp, errmsg[STRERR_BUFSIZE];
2452 int log_buf_size = BPF_LOG_BUF_SIZE; 2458 int log_buf_size = BPF_LOG_BUF_SIZE;
2453 char *log_buf; 2459 char *log_buf;
2454 int ret; 2460 int btf_fd, ret;
2455 2461
2456 if (!insns || !insns_cnt) 2462 if (!insns || !insns_cnt)
2457 return -EINVAL; 2463 return -EINVAL;
@@ -2466,7 +2472,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2466 load_attr.license = license; 2472 load_attr.license = license;
2467 load_attr.kern_version = kern_version; 2473 load_attr.kern_version = kern_version;
2468 load_attr.prog_ifindex = prog->prog_ifindex; 2474 load_attr.prog_ifindex = prog->prog_ifindex;
2469 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 2475 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
2476 if (prog->obj->btf_ext)
2477 btf_fd = bpf_object__btf_fd(prog->obj);
2478 else
2479 btf_fd = -1;
2480 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
2470 load_attr.func_info = prog->func_info; 2481 load_attr.func_info = prog->func_info;
2471 load_attr.func_info_rec_size = prog->func_info_rec_size; 2482 load_attr.func_info_rec_size = prog->func_info_rec_size;
2472 load_attr.func_info_cnt = prog->func_info_cnt; 2483 load_attr.func_info_cnt = prog->func_info_cnt;
@@ -4507,13 +4518,13 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
4507 const struct perf_buffer_opts *opts) 4518 const struct perf_buffer_opts *opts)
4508{ 4519{
4509 struct perf_buffer_params p = {}; 4520 struct perf_buffer_params p = {};
4510 struct perf_event_attr attr = { 4521 struct perf_event_attr attr = { 0, };
4511 .config = PERF_COUNT_SW_BPF_OUTPUT, 4522
4512 .type = PERF_TYPE_SOFTWARE, 4523 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
4513 .sample_type = PERF_SAMPLE_RAW, 4524 attr.type = PERF_TYPE_SOFTWARE;
4514 .sample_period = 1, 4525 attr.sample_type = PERF_SAMPLE_RAW;
4515 .wakeup_events = 1, 4526 attr.sample_period = 1;
4516 }; 4527 attr.wakeup_events = 1;
4517 4528
4518 p.attr = &attr; 4529 p.attr = &attr;
4519 p.sample_cb = opts ? opts->sample_cb : NULL; 4530 p.sample_cb = opts ? opts->sample_cb : NULL;
@@ -4988,13 +4999,15 @@ int libbpf_num_possible_cpus(void)
4988 static const char *fcpu = "/sys/devices/system/cpu/possible"; 4999 static const char *fcpu = "/sys/devices/system/cpu/possible";
4989 int len = 0, n = 0, il = 0, ir = 0; 5000 int len = 0, n = 0, il = 0, ir = 0;
4990 unsigned int start = 0, end = 0; 5001 unsigned int start = 0, end = 0;
5002 int tmp_cpus = 0;
4991 static int cpus; 5003 static int cpus;
4992 char buf[128]; 5004 char buf[128];
4993 int error = 0; 5005 int error = 0;
4994 int fd = -1; 5006 int fd = -1;
4995 5007
4996 if (cpus > 0) 5008 tmp_cpus = READ_ONCE(cpus);
4997 return cpus; 5009 if (tmp_cpus > 0)
5010 return tmp_cpus;
4998 5011
4999 fd = open(fcpu, O_RDONLY); 5012 fd = open(fcpu, O_RDONLY);
5000 if (fd < 0) { 5013 if (fd < 0) {
@@ -5017,7 +5030,7 @@ int libbpf_num_possible_cpus(void)
5017 } 5030 }
5018 buf[len] = '\0'; 5031 buf[len] = '\0';
5019 5032
5020 for (ir = 0, cpus = 0; ir <= len; ir++) { 5033 for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
5021 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 5034 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
5022 if (buf[ir] == ',' || buf[ir] == '\0') { 5035 if (buf[ir] == ',' || buf[ir] == '\0') {
5023 buf[ir] = '\0'; 5036 buf[ir] = '\0';
@@ -5029,13 +5042,15 @@ int libbpf_num_possible_cpus(void)
5029 } else if (n == 1) { 5042 } else if (n == 1) {
5030 end = start; 5043 end = start;
5031 } 5044 }
5032 cpus += end - start + 1; 5045 tmp_cpus += end - start + 1;
5033 il = ir + 1; 5046 il = ir + 1;
5034 } 5047 }
5035 } 5048 }
5036 if (cpus <= 0) { 5049 if (tmp_cpus <= 0) {
5037 pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu); 5050 pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
5038 return -EINVAL; 5051 return -EINVAL;
5039 } 5052 }
5040 return cpus; 5053
5054 WRITE_ONCE(cpus, tmp_cpus);
5055 return tmp_cpus;
5041} 5056}
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 5007b5d4fd2c..680e63066cf3 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -317,17 +317,16 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
317 317
318static int xsk_get_max_queues(struct xsk_socket *xsk) 318static int xsk_get_max_queues(struct xsk_socket *xsk)
319{ 319{
320 struct ethtool_channels channels; 320 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
321 struct ifreq ifr; 321 struct ifreq ifr = {};
322 int fd, err, ret; 322 int fd, err, ret;
323 323
324 fd = socket(AF_INET, SOCK_DGRAM, 0); 324 fd = socket(AF_INET, SOCK_DGRAM, 0);
325 if (fd < 0) 325 if (fd < 0)
326 return -errno; 326 return -errno;
327 327
328 channels.cmd = ETHTOOL_GCHANNELS;
329 ifr.ifr_data = (void *)&channels; 328 ifr.ifr_data = (void *)&channels;
330 strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1); 329 memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
331 ifr.ifr_name[IFNAMSIZ - 1] = '\0'; 330 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
332 err = ioctl(fd, SIOCETHTOOL, &ifr); 331 err = ioctl(fd, SIOCETHTOOL, &ifr);
333 if (err && errno != EOPNOTSUPP) { 332 if (err && errno != EOPNOTSUPP) {
@@ -335,7 +334,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
335 goto out; 334 goto out;
336 } 335 }
337 336
338 if (channels.max_combined == 0 || errno == EOPNOTSUPP) 337 if (err || channels.max_combined == 0)
339 /* If the device says it has no channels, then all traffic 338 /* If the device says it has no channels, then all traffic
340 * is sent to a single stream, so max queues = 1. 339 * is sent to a single stream, so max queues = 1.
341 */ 340 */
@@ -517,7 +516,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
517 err = -errno; 516 err = -errno;
518 goto out_socket; 517 goto out_socket;
519 } 518 }
520 strncpy(xsk->ifname, ifname, IFNAMSIZ - 1); 519 memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
521 xsk->ifname[IFNAMSIZ - 1] = '\0'; 520 xsk->ifname[IFNAMSIZ - 1] = '\0';
522 521
523 err = xsk_set_xdp_socket_config(&xsk->config, usr_config); 522 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 5f26620f13f5..176f2f084060 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1946,6 +1946,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
1946 struct alternative *alt; 1946 struct alternative *alt;
1947 struct instruction *insn, *next_insn; 1947 struct instruction *insn, *next_insn;
1948 struct section *sec; 1948 struct section *sec;
1949 u8 visited;
1949 int ret; 1950 int ret;
1950 1951
1951 insn = first; 1952 insn = first;
@@ -1972,12 +1973,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
1972 return 1; 1973 return 1;
1973 } 1974 }
1974 1975
1976 visited = 1 << state.uaccess;
1975 if (insn->visited) { 1977 if (insn->visited) {
1976 if (!insn->hint && !insn_state_match(insn, &state)) 1978 if (!insn->hint && !insn_state_match(insn, &state))
1977 return 1; 1979 return 1;
1978 1980
1979 /* If we were here with AC=0, but now have AC=1, go again */ 1981 if (insn->visited & visited)
1980 if (insn->state.uaccess || !state.uaccess)
1981 return 0; 1982 return 0;
1982 } 1983 }
1983 1984
@@ -2024,7 +2025,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
2024 } else 2025 } else
2025 insn->state = state; 2026 insn->state = state;
2026 2027
2027 insn->visited = true; 2028 insn->visited |= visited;
2028 2029
2029 if (!insn->ignore_alts) { 2030 if (!insn->ignore_alts) {
2030 bool skip_orig = false; 2031 bool skip_orig = false;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index b881fafcf55d..6d875ca6fce0 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -33,8 +33,9 @@ struct instruction {
33 unsigned int len; 33 unsigned int len;
34 enum insn_type type; 34 enum insn_type type;
35 unsigned long immediate; 35 unsigned long immediate;
36 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; 36 bool alt_group, dead_end, ignore, hint, save, restore, ignore_alts;
37 bool retpoline_safe; 37 bool retpoline_safe;
38 u8 visited;
38 struct symbol *call_dest; 39 struct symbol *call_dest;
39 struct instruction *jump_dest; 40 struct instruction *jump_dest;
40 struct instruction *first_jump_src; 41 struct instruction *first_jump_src;
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 6d148a40551c..adc5a7e44b98 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -242,7 +242,7 @@ $(OUTPUT)doc.dep : $(wildcard *.txt) build-docdep.perl
242 $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ 242 $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \
243 mv $@+ $@ 243 mv $@+ $@
244 244
245-include $(OUPTUT)doc.dep 245-include $(OUTPUT)doc.dep
246 246
247_cmds_txt = cmds-ancillaryinterrogators.txt \ 247_cmds_txt = cmds-ancillaryinterrogators.txt \
248 cmds-ancillarymanipulators.txt \ 248 cmds-ancillarymanipulators.txt \
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index d4e2e18a5881..caaab28f8400 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -228,11 +228,11 @@ OPTIONS
228 228
229 With the metric option perf script can compute metrics for 229 With the metric option perf script can compute metrics for
230 sampling periods, similar to perf stat. This requires 230 sampling periods, similar to perf stat. This requires
231 specifying a group with multiple metrics with the :S option 231 specifying a group with multiple events defining metrics with the :S option
232 for perf record. perf will sample on the first event, and 232 for perf record. perf will sample on the first event, and
233 compute metrics for all the events in the group. Please note 233 print computed metrics for all the events in the group. Please note
234 that the metric computed is averaged over the whole sampling 234 that the metric computed is averaged over the whole sampling
235 period, not just for the sample point. 235 period (since the last sample), not just for the sample point.
236 236
237 For sample events it's possible to display misc field with -F +misc option, 237 For sample events it's possible to display misc field with -F +misc option,
238 following letters are displayed for each bit: 238 following letters are displayed for each bit:
@@ -384,7 +384,7 @@ include::itrace.txt[]
384 perf script --time 0%-10%,30%-40% 384 perf script --time 0%-10%,30%-40%
385 385
386--max-blocks:: 386--max-blocks::
387 Set the maximum number of program blocks to print with brstackasm for 387 Set the maximum number of program blocks to print with brstackinsn for
388 each sample. 388 each sample.
389 389
390--reltime:: 390--reltime::
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 5f54feb19977..d030c87ed9f5 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -126,7 +126,7 @@ vendor,family,model,stepping. For example: GenuineIntel,6,69,1
126 126
127 HEADER_TOTAL_MEM = 10, 127 HEADER_TOTAL_MEM = 10,
128 128
129An uint64_t with the total memory in bytes. 129An uint64_t with the total memory in kilobytes.
130 130
131 HEADER_CMDLINE = 11, 131 HEADER_CMDLINE = 11,
132 132
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index a19690a17291..c8c86a0c9b79 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -6,8 +6,9 @@
6#include "machine.h" 6#include "machine.h"
7#include "api/fs/fs.h" 7#include "api/fs/fs.h"
8#include "debug.h" 8#include "debug.h"
9#include "symbol.h"
9 10
10int arch__fix_module_text_start(u64 *start, const char *name) 11int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
11{ 12{
12 u64 m_start = *start; 13 u64 m_start = *start;
13 char path[PATH_MAX]; 14 char path[PATH_MAX];
@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
17 if (sysfs__read_ull(path, (unsigned long long *)start) < 0) { 18 if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
18 pr_debug2("Using module %s start:%#lx\n", path, m_start); 19 pr_debug2("Using module %s start:%#lx\n", path, m_start);
19 *start = m_start; 20 *start = m_start;
21 } else {
22 /* Successful read of the modules segment text start address.
23 * Calculate difference between module start address
24 * in memory and module text segment start address.
25 * For example module load address is 0x3ff8011b000
26 * (from /proc/modules) and module text segment start
27 * address is 0x3ff8011b870 (from file above).
28 *
29 * Adjust the module size and subtract the GOT table
30 * size located at the beginning of the module.
31 */
32 *size -= (*start - m_start);
20 } 33 }
21 34
22 return 0; 35 return 0;
23} 36}
37
38/* On s390 kernel text segment start is located at very low memory addresses,
39 * for example 0x10000. Modules are located at very high memory addresses,
40 * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
41 * and beginning of first module's text segment is very big.
42 * Therefore do not fill this gap and do not assign it to the kernel dso map.
43 */
44void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
45{
46 if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
47 /* Last kernel symbol mapped to end of page */
48 p->end = roundup(p->end, page_size);
49 else
50 p->end = c->start;
51 pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
52}
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index b4e6f9e6204a..c29976eca4a8 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -355,6 +355,8 @@
355431 common fsconfig __x64_sys_fsconfig 355431 common fsconfig __x64_sys_fsconfig
356432 common fsmount __x64_sys_fsmount 356432 common fsmount __x64_sys_fsmount
357433 common fspick __x64_sys_fspick 357433 common fspick __x64_sys_fspick
358434 common pidfd_open __x64_sys_pidfd_open
359435 common clone3 __x64_sys_clone3/ptregs
358 360
359# 361#
360# x32-specific system call numbers start at 512 to avoid cache impact 362# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index a640ca7aaada..513cb2f2fa32 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -379,8 +379,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
379 379
380 /* Allocate and initialize all memory on CPU#0: */ 380 /* Allocate and initialize all memory on CPU#0: */
381 if (init_cpu0) { 381 if (init_cpu0) {
382 orig_mask = bind_to_node(0); 382 int node = numa_node_of_cpu(0);
383 bind_to_memnode(0); 383
384 orig_mask = bind_to_node(node);
385 bind_to_memnode(node);
384 } 386 }
385 387
386 bytes = bytes0 + HPSIZE; 388 bytes = bytes0 + HPSIZE;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 66d5a6658daf..019312810405 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -173,7 +173,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
173 int last_cpu; 173 int last_cpu;
174 174
175 last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1); 175 last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
176 mask_size = (last_cpu + 3) / 4 + 1; 176 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
177 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ 177 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
178 178
179 cpumask = malloc(mask_size); 179 cpumask = malloc(mask_size);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 6418782951a4..3d0ffd41fb55 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -698,6 +698,16 @@ __cmd_probe(int argc, const char **argv)
698 698
699 ret = perf_add_probe_events(params.events, params.nevents); 699 ret = perf_add_probe_events(params.events, params.nevents);
700 if (ret < 0) { 700 if (ret < 0) {
701
702 /*
703 * When perf_add_probe_events() fails it calls
704 * cleanup_perf_probe_events(pevs, npevs), i.e.
705 * cleanup_perf_probe_events(params.events, params.nevents), which
706 * will call clear_perf_probe_event(), so set nevents to zero
707 * to avoid cleanup_params() to call clear_perf_probe_event() again
708 * on the same pevs.
709 */
710 params.nevents = 0;
701 pr_err_with_code(" Error: Failed to add events.", ret); 711 pr_err_with_code(" Error: Failed to add events.", ret);
702 return ret; 712 return ret;
703 } 713 }
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 8f24865596af..0140ddb8dd0b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1059,7 +1059,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1059 1059
1060 printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); 1060 printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
1061 if (ip == end) { 1061 if (ip == end) {
1062 printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp, 1062 printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp,
1063 &total_cycles); 1063 &total_cycles);
1064 if (PRINT_FIELD(SRCCODE)) 1064 if (PRINT_FIELD(SRCCODE))
1065 printed += print_srccode(thread, x.cpumode, ip); 1065 printed += print_srccode(thread, x.cpumode, ip);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index b55a534b4de0..352cf39d7c2f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -607,7 +607,13 @@ try_again:
607 * group leaders. 607 * group leaders.
608 */ 608 */
609 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 609 read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
610 perf_evlist__close(evsel_list); 610
611 /*
612 * We need to keep evsel_list alive, because it's processed
613 * later the evsel_list will be closed after.
614 */
615 if (!STAT_RECORD)
616 perf_evlist__close(evsel_list);
611 617
612 return WEXITSTATUS(status); 618 return WEXITSTATUS(status);
613} 619}
@@ -1997,6 +2003,7 @@ int cmd_stat(int argc, const char **argv)
1997 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2003 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
1998 } 2004 }
1999 2005
2006 perf_evlist__close(evsel_list);
2000 perf_session__delete(perf_stat.session); 2007 perf_session__delete(perf_stat.session);
2001 } 2008 }
2002 2009
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 1a91a197cafb..d413761621b0 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -453,6 +453,7 @@ static struct fixed {
453 { "inst_retired.any_p", "event=0xc0" }, 453 { "inst_retired.any_p", "event=0xc0" },
454 { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" }, 454 { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
455 { "cpu_clk_unhalted.thread", "event=0x3c" }, 455 { "cpu_clk_unhalted.thread", "event=0x3c" },
456 { "cpu_clk_unhalted.core", "event=0x3c" },
456 { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" }, 457 { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
457 { NULL, NULL}, 458 { NULL, NULL},
458}; 459};
diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
index 930b80f422e8..aa597ae53747 100755
--- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh
+++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
@@ -3,10 +3,13 @@
3 3
4[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ 4[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
5 5
6# also as:
7# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
8
6printf "static const char *usbdevfs_ioctl_cmds[] = {\n" 9printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
7regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" 10regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
8egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \ 11egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
9 sed -r "s/$regex/\2 \1/g" | \ 12 sed -r "s/$regex/\4 \1/g" | \
10 sort | xargs printf "\t[%s] = \"%s\",\n" 13 sort | xargs printf "\t[%s] = \"%s\",\n"
11printf "};\n\n" 14printf "};\n\n"
12printf "#if 0\n" 15printf "#if 0\n"
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index f80c51d53565..d227d74b28f8 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -1,7 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include "../string2.h" 2#include "../util/util.h"
3#include "../config.h" 3#include "../util/string2.h"
4#include "../../perf.h" 4#include "../util/config.h"
5#include "../perf.h"
5#include "libslang.h" 6#include "libslang.h"
6#include "ui.h" 7#include "ui.h"
7#include "util.h" 8#include "util.h"
@@ -14,7 +15,7 @@
14#include "browser.h" 15#include "browser.h"
15#include "helpline.h" 16#include "helpline.h"
16#include "keysyms.h" 17#include "keysyms.h"
17#include "../color.h" 18#include "../util/color.h"
18#include <linux/ctype.h> 19#include <linux/ctype.h>
19#include <linux/zalloc.h> 20#include <linux/zalloc.h>
20 21
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c
index bc134b82829d..5a24dd3ce4db 100644
--- a/tools/perf/ui/tui/progress.c
+++ b/tools/perf/ui/tui/progress.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include "../cache.h" 3#include "../../util/cache.h"
4#include "../progress.h" 4#include "../progress.h"
5#include "../libslang.h" 5#include "../libslang.h"
6#include "../ui.h" 6#include "../ui.h"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ac9ad2330f93..163536720149 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1122,7 +1122,7 @@ static int disasm_line__parse(char *line, const char **namep, char **rawp)
1122 goto out; 1122 goto out;
1123 1123
1124 (*rawp)[0] = tmp; 1124 (*rawp)[0] = tmp;
1125 *rawp = skip_spaces(*rawp); 1125 *rawp = strim(*rawp);
1126 1126
1127 return 0; 1127 return 0;
1128 1128
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 3acfbe34ebaf..39cce66b4ebc 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -751,7 +751,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
751 unsigned char *bitmap; 751 unsigned char *bitmap;
752 int last_cpu = cpu_map__cpu(map, map->nr - 1); 752 int last_cpu = cpu_map__cpu(map, map->nr - 1);
753 753
754 bitmap = zalloc((last_cpu + 7) / 8); 754 if (buf == NULL)
755 return 0;
756
757 bitmap = zalloc(last_cpu / 8 + 1);
755 if (bitmap == NULL) { 758 if (bitmap == NULL) {
756 buf[0] = '\0'; 759 buf[0] = '\0';
757 return 0; 760 return 0;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ebb46da4dfe5..52459dd5ad0c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1291,6 +1291,7 @@ static void perf_evsel__free_id(struct perf_evsel *evsel)
1291 xyarray__delete(evsel->sample_id); 1291 xyarray__delete(evsel->sample_id);
1292 evsel->sample_id = NULL; 1292 evsel->sample_id = NULL;
1293 zfree(&evsel->id); 1293 zfree(&evsel->id);
1294 evsel->ids = 0;
1294} 1295}
1295 1296
1296static void perf_evsel__free_config_terms(struct perf_evsel *evsel) 1297static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
@@ -2077,6 +2078,7 @@ void perf_evsel__close(struct perf_evsel *evsel)
2077 2078
2078 perf_evsel__close_fd(evsel); 2079 perf_evsel__close_fd(evsel);
2079 perf_evsel__free_fd(evsel); 2080 perf_evsel__free_fd(evsel);
2081 perf_evsel__free_id(evsel);
2080} 2082}
2081 2083
2082int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 2084int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index c24db7f4909c..1903d7ec9797 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3559,6 +3559,13 @@ int perf_session__read_header(struct perf_session *session)
3559 data->file.path); 3559 data->file.path);
3560 } 3560 }
3561 3561
3562 if (f_header.attr_size == 0) {
3563 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3564 "Was the 'perf record' command properly terminated?\n",
3565 data->file.path);
3566 return -EINVAL;
3567 }
3568
3562 nr_attrs = f_header.attrs.size / f_header.attr_size; 3569 nr_attrs = f_header.attrs.size / f_header.attr_size;
3563 lseek(fd, f_header.attrs.offset, SEEK_SET); 3570 lseek(fd, f_header.attrs.offset, SEEK_SET);
3564 3571
@@ -3639,7 +3646,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
3639 size += sizeof(struct perf_event_header); 3646 size += sizeof(struct perf_event_header);
3640 size += ids * sizeof(u64); 3647 size += ids * sizeof(u64);
3641 3648
3642 ev = malloc(size); 3649 ev = zalloc(size);
3643 3650
3644 if (ev == NULL) 3651 if (ev == NULL)
3645 return -ENOMEM; 3652 return -ENOMEM;
@@ -3747,7 +3754,7 @@ int perf_event__process_feature(struct perf_session *session,
3747 return 0; 3754 return 0;
3748 3755
3749 ff.buf = (void *)fe->data; 3756 ff.buf = (void *)fe->data;
3750 ff.size = event->header.size - sizeof(event->header); 3757 ff.size = event->header.size - sizeof(*fe);
3751 ff.ph = &session->header; 3758 ff.ph = &session->header;
3752 3759
3753 if (feat_ops[feat].process(&ff, NULL)) 3760 if (feat_ops[feat].process(&ff, NULL))
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index cf826eca3aaf..83b2fbbeeb90 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1378,6 +1378,7 @@ static int machine__set_modules_path(struct machine *machine)
1378 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1378 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1379} 1379}
1380int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1380int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1381 u64 *size __maybe_unused,
1381 const char *name __maybe_unused) 1382 const char *name __maybe_unused)
1382{ 1383{
1383 return 0; 1384 return 0;
@@ -1389,7 +1390,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
1389 struct machine *machine = arg; 1390 struct machine *machine = arg;
1390 struct map *map; 1391 struct map *map;
1391 1392
1392 if (arch__fix_module_text_start(&start, name) < 0) 1393 if (arch__fix_module_text_start(&start, &size, name) < 0)
1393 return -1; 1394 return -1;
1394 1395
1395 map = machine__findnew_module_map(machine, start, name); 1396 map = machine__findnew_module_map(machine, start, name);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index f70ab98a7bde..7aa38da26427 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -222,7 +222,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
222 222
223struct map *machine__findnew_module_map(struct machine *machine, u64 start, 223struct map *machine__findnew_module_map(struct machine *machine, u64 start,
224 const char *filename); 224 const char *filename);
225int arch__fix_module_text_start(u64 *start, const char *name); 225int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
226 226
227int machine__load_kallsyms(struct machine *machine, const char *filename); 227int machine__load_kallsyms(struct machine *machine, const char *filename);
228 228
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index cd1eb73cfe83..8394d48f8b32 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2230,6 +2230,7 @@ void clear_perf_probe_event(struct perf_probe_event *pev)
2230 field = next; 2230 field = next;
2231 } 2231 }
2232 } 2232 }
2233 pev->nargs = 0;
2233 zfree(&pev->args); 2234 zfree(&pev->args);
2234} 2235}
2235 2236
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index d0fd6c614e68..37efa1f43d8b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -36,10 +36,16 @@ static int perf_session__process_compressed_event(struct perf_session *session,
36 void *src; 36 void *src;
37 size_t decomp_size, src_size; 37 size_t decomp_size, src_size;
38 u64 decomp_last_rem = 0; 38 u64 decomp_last_rem = 0;
39 size_t decomp_len = session->header.env.comp_mmap_len; 39 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
40 struct decomp *decomp, *decomp_last = session->decomp_last; 40 struct decomp *decomp, *decomp_last = session->decomp_last;
41 41
42 decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE, 42 if (decomp_last) {
43 decomp_last_rem = decomp_last->size - decomp_last->head;
44 decomp_len += decomp_last_rem;
45 }
46
47 mmap_len = sizeof(struct decomp) + decomp_len;
48 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
43 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 49 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
44 if (decomp == MAP_FAILED) { 50 if (decomp == MAP_FAILED) {
45 pr_err("Couldn't allocate memory for decompression\n"); 51 pr_err("Couldn't allocate memory for decompression\n");
@@ -47,10 +53,10 @@ static int perf_session__process_compressed_event(struct perf_session *session,
47 } 53 }
48 54
49 decomp->file_pos = file_offset; 55 decomp->file_pos = file_offset;
56 decomp->mmap_len = mmap_len;
50 decomp->head = 0; 57 decomp->head = 0;
51 58
52 if (decomp_last) { 59 if (decomp_last_rem) {
53 decomp_last_rem = decomp_last->size - decomp_last->head;
54 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); 60 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
55 decomp->size = decomp_last_rem; 61 decomp->size = decomp_last_rem;
56 } 62 }
@@ -61,7 +67,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
61 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size, 67 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
62 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem); 68 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
63 if (!decomp_size) { 69 if (!decomp_size) {
64 munmap(decomp, sizeof(struct decomp) + decomp_len); 70 munmap(decomp, mmap_len);
65 pr_err("Couldn't decompress data\n"); 71 pr_err("Couldn't decompress data\n");
66 return -1; 72 return -1;
67 } 73 }
@@ -255,15 +261,15 @@ static void perf_session__delete_threads(struct perf_session *session)
255static void perf_session__release_decomp_events(struct perf_session *session) 261static void perf_session__release_decomp_events(struct perf_session *session)
256{ 262{
257 struct decomp *next, *decomp; 263 struct decomp *next, *decomp;
258 size_t decomp_len; 264 size_t mmap_len;
259 next = session->decomp; 265 next = session->decomp;
260 decomp_len = session->header.env.comp_mmap_len;
261 do { 266 do {
262 decomp = next; 267 decomp = next;
263 if (decomp == NULL) 268 if (decomp == NULL)
264 break; 269 break;
265 next = decomp->next; 270 next = decomp->next;
266 munmap(decomp, decomp_len + sizeof(struct decomp)); 271 mmap_len = decomp->mmap_len;
272 munmap(decomp, mmap_len);
267 } while (1); 273 } while (1);
268} 274}
269 275
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index dd8920b745bc..863dbad87849 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -46,6 +46,7 @@ struct perf_session {
46struct decomp { 46struct decomp {
47 struct decomp *next; 47 struct decomp *next;
48 u64 file_pos; 48 u64 file_pos;
49 size_t mmap_len;
49 u64 head; 50 u64 head;
50 size_t size; 51 size_t size;
51 char data[]; 52 char data[];
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 656065af4971..accb1bf1cfd8 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -819,7 +819,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
819 "stalled cycles per insn", 819 "stalled cycles per insn",
820 ratio); 820 ratio);
821 } else if (have_frontend_stalled) { 821 } else if (have_frontend_stalled) {
822 print_metric(config, ctxp, NULL, NULL, 822 out->new_line(config, ctxp);
823 print_metric(config, ctxp, NULL, "%7.2f ",
823 "stalled cycles per insn", 0); 824 "stalled cycles per insn", 0);
824 } 825 }
825 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { 826 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 173f3378aaa0..4efde7879474 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -92,6 +92,11 @@ static int prefix_underscores_count(const char *str)
92 return tail - str; 92 return tail - str;
93} 93}
94 94
95void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
96{
97 p->end = c->start;
98}
99
95const char * __weak arch__normalize_symbol_name(const char *name) 100const char * __weak arch__normalize_symbol_name(const char *name)
96{ 101{
97 return name; 102 return name;
@@ -218,7 +223,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
218 curr = rb_entry(nd, struct symbol, rb_node); 223 curr = rb_entry(nd, struct symbol, rb_node);
219 224
220 if (prev->end == prev->start && prev->end != curr->start) 225 if (prev->end == prev->start && prev->end != curr->start)
221 prev->end = curr->start; 226 arch__symbols__fixup_end(prev, curr);
222 } 227 }
223 228
224 /* Last entry */ 229 /* Last entry */
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 12755b42ea93..183f630cb5f1 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -288,6 +288,7 @@ const char *arch__normalize_symbol_name(const char *name);
288#define SYMBOL_A 0 288#define SYMBOL_A 0
289#define SYMBOL_B 1 289#define SYMBOL_B 1
290 290
291void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
291int arch__compare_symbol_names(const char *namea, const char *nameb); 292int arch__compare_symbol_names(const char *namea, const char *nameb);
292int arch__compare_symbol_names_n(const char *namea, const char *nameb, 293int arch__compare_symbol_names_n(const char *namea, const char *nameb,
293 unsigned int n); 294 unsigned int n);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 873ab505ca80..590793cc5142 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -214,14 +214,24 @@ struct comm *thread__comm(const struct thread *thread)
214 214
215struct comm *thread__exec_comm(const struct thread *thread) 215struct comm *thread__exec_comm(const struct thread *thread)
216{ 216{
217 struct comm *comm, *last = NULL; 217 struct comm *comm, *last = NULL, *second_last = NULL;
218 218
219 list_for_each_entry(comm, &thread->comm_list, list) { 219 list_for_each_entry(comm, &thread->comm_list, list) {
220 if (comm->exec) 220 if (comm->exec)
221 return comm; 221 return comm;
222 second_last = last;
222 last = comm; 223 last = comm;
223 } 224 }
224 225
226 /*
227 * 'last' with no start time might be the parent's comm of a synthesized
228 * thread (created by processing a synthesized fork event). For a main
229 * thread, that is very probably wrong. Prefer a later comm to avoid
230 * that case.
231 */
232 if (second_last && !last->start && thread->pid_ == thread->tid)
233 return second_last;
234
225 return last; 235 return last;
226} 236}
227 237
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
index 23bdb9884576..d2202392ffdb 100644
--- a/tools/perf/util/zstd.c
+++ b/tools/perf/util/zstd.c
@@ -99,8 +99,8 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
99 while (input.pos < input.size) { 99 while (input.pos < input.size) {
100 ret = ZSTD_decompressStream(data->dstream, &output, &input); 100 ret = ZSTD_decompressStream(data->dstream, &output, &input);
101 if (ZSTD_isError(ret)) { 101 if (ZSTD_isError(ret)) {
102 pr_err("failed to decompress (B): %ld -> %ld : %s\n", 102 pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
103 src_size, output.size, ZSTD_getErrorName(ret)); 103 src_size, output.size, dst_size, ZSTD_getErrorName(ret));
104 break; 104 break;
105 } 105 }
106 output.dst = dst + output.pos; 106 output.dst = dst + output.pos;
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 045f5f7d68ab..13f1e8b9ac52 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,9 +9,10 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11turbostat : turbostat.c 11turbostat : turbostat.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' 14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
15override CFLAGS += -D_FORTIFY_SOURCE=2
15 16
16%: %.c 17%: %.c
17 @mkdir -p $(BUILD_OUTPUT) 18 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 75fc4fb9901c..b2a86438f074 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -39,7 +39,6 @@ FILE *outf;
39int *fd_percpu; 39int *fd_percpu;
40struct timeval interval_tv = {5, 0}; 40struct timeval interval_tv = {5, 0};
41struct timespec interval_ts = {5, 0}; 41struct timespec interval_ts = {5, 0};
42struct timespec one_msec = {0, 1000000};
43unsigned int num_iterations; 42unsigned int num_iterations;
44unsigned int debug; 43unsigned int debug;
45unsigned int quiet; 44unsigned int quiet;
@@ -60,6 +59,7 @@ unsigned int do_irtl_hsw;
60unsigned int units = 1000000; /* MHz etc */ 59unsigned int units = 1000000; /* MHz etc */
61unsigned int genuine_intel; 60unsigned int genuine_intel;
62unsigned int authentic_amd; 61unsigned int authentic_amd;
62unsigned int hygon_genuine;
63unsigned int max_level, max_extended_level; 63unsigned int max_level, max_extended_level;
64unsigned int has_invariant_tsc; 64unsigned int has_invariant_tsc;
65unsigned int do_nhm_platform_info; 65unsigned int do_nhm_platform_info;
@@ -100,6 +100,7 @@ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
101unsigned int has_misc_feature_control; 101unsigned int has_misc_feature_control;
102unsigned int first_counter_read = 1; 102unsigned int first_counter_read = 1;
103int ignore_stdin;
103 104
104#define RAPL_PKG (1 << 0) 105#define RAPL_PKG (1 << 0)
105 /* 0x610 MSR_PKG_POWER_LIMIT */ 106 /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -166,6 +167,7 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
166struct thread_data { 167struct thread_data {
167 struct timeval tv_begin; 168 struct timeval tv_begin;
168 struct timeval tv_end; 169 struct timeval tv_end;
170 struct timeval tv_delta;
169 unsigned long long tsc; 171 unsigned long long tsc;
170 unsigned long long aperf; 172 unsigned long long aperf;
171 unsigned long long mperf; 173 unsigned long long mperf;
@@ -506,6 +508,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
506unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; 508unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
507 509
508#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) 510#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
511#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
509#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) 512#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
510#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) 513#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
511#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) 514#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
@@ -849,7 +852,6 @@ int dump_counters(struct thread_data *t, struct core_data *c,
849 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 852 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
850 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 853 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
851 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 854 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
852 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
853 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); 855 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
854 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); 856 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
855 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 857 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
@@ -911,7 +913,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
911 if (DO_BIC(BIC_TOD)) 913 if (DO_BIC(BIC_TOD))
912 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); 914 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
913 915
914 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 916 interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
915 917
916 tsc = t->tsc * tsc_tweak; 918 tsc = t->tsc * tsc_tweak;
917 919
@@ -1287,6 +1289,14 @@ delta_core(struct core_data *new, struct core_data *old)
1287 } 1289 }
1288} 1290}
1289 1291
1292int soft_c1_residency_display(int bic)
1293{
1294 if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
1295 return 0;
1296
1297 return DO_BIC_READ(bic);
1298}
1299
1290/* 1300/*
1291 * old = new - old 1301 * old = new - old
1292 */ 1302 */
@@ -1309,6 +1319,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1309 * over-write old w/ new so we can print end of interval values 1319 * over-write old w/ new so we can print end of interval values
1310 */ 1320 */
1311 1321
1322 timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
1312 old->tv_begin = new->tv_begin; 1323 old->tv_begin = new->tv_begin;
1313 old->tv_end = new->tv_end; 1324 old->tv_end = new->tv_end;
1314 1325
@@ -1322,7 +1333,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1322 1333
1323 old->c1 = new->c1 - old->c1; 1334 old->c1 = new->c1 - old->c1;
1324 1335
1325 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1336 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1337 soft_c1_residency_display(BIC_Avg_MHz)) {
1326 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 1338 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1327 old->aperf = new->aperf - old->aperf; 1339 old->aperf = new->aperf - old->aperf;
1328 old->mperf = new->mperf - old->mperf; 1340 old->mperf = new->mperf - old->mperf;
@@ -1404,6 +1416,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
1404 t->tv_begin.tv_usec = 0; 1416 t->tv_begin.tv_usec = 0;
1405 t->tv_end.tv_sec = 0; 1417 t->tv_end.tv_sec = 0;
1406 t->tv_end.tv_usec = 0; 1418 t->tv_end.tv_usec = 0;
1419 t->tv_delta.tv_sec = 0;
1420 t->tv_delta.tv_usec = 0;
1407 1421
1408 t->tsc = 0; 1422 t->tsc = 0;
1409 t->aperf = 0; 1423 t->aperf = 0;
@@ -1573,6 +1587,9 @@ void compute_average(struct thread_data *t, struct core_data *c,
1573 1587
1574 for_all_cpus(sum_counters, t, c, p); 1588 for_all_cpus(sum_counters, t, c, p);
1575 1589
1590 /* Use the global time delta for the average. */
1591 average.threads.tv_delta = tv_delta;
1592
1576 average.threads.tsc /= topo.num_cpus; 1593 average.threads.tsc /= topo.num_cpus;
1577 average.threads.aperf /= topo.num_cpus; 1594 average.threads.aperf /= topo.num_cpus;
1578 average.threads.mperf /= topo.num_cpus; 1595 average.threads.mperf /= topo.num_cpus;
@@ -1714,7 +1731,7 @@ void get_apic_id(struct thread_data *t)
1714 if (!DO_BIC(BIC_X2APIC)) 1731 if (!DO_BIC(BIC_X2APIC))
1715 return; 1732 return;
1716 1733
1717 if (authentic_amd) { 1734 if (authentic_amd || hygon_genuine) {
1718 unsigned int topology_extensions; 1735 unsigned int topology_extensions;
1719 1736
1720 if (max_extended_level < 0x8000001e) 1737 if (max_extended_level < 0x8000001e)
@@ -1762,19 +1779,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1762 struct msr_counter *mp; 1779 struct msr_counter *mp;
1763 int i; 1780 int i;
1764 1781
1765 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1766
1767 if (cpu_migrate(cpu)) { 1782 if (cpu_migrate(cpu)) {
1768 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1783 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1769 return -1; 1784 return -1;
1770 } 1785 }
1771 1786
1787 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1788
1772 if (first_counter_read) 1789 if (first_counter_read)
1773 get_apic_id(t); 1790 get_apic_id(t);
1774retry: 1791retry:
1775 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1792 t->tsc = rdtsc(); /* we are running on local CPU of interest */
1776 1793
1777 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1794 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1795 soft_c1_residency_display(BIC_Avg_MHz)) {
1778 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1796 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1779 1797
1780 /* 1798 /*
@@ -1851,20 +1869,20 @@ retry:
1851 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1869 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1852 goto done; 1870 goto done;
1853 1871
1854 if (DO_BIC(BIC_CPU_c3)) { 1872 if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
1855 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1873 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1856 return -6; 1874 return -6;
1857 } 1875 }
1858 1876
1859 if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { 1877 if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
1860 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1878 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1861 return -7; 1879 return -7;
1862 } else if (do_knl_cstates) { 1880 } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
1863 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1881 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1864 return -7; 1882 return -7;
1865 } 1883 }
1866 1884
1867 if (DO_BIC(BIC_CPU_c7)) 1885 if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
1868 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1886 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1869 return -8; 1887 return -8;
1870 1888
@@ -2912,6 +2930,7 @@ int snapshot_cpu_lpi_us(void)
2912 if (retval != 1) { 2930 if (retval != 1) {
2913 fprintf(stderr, "Disabling Low Power Idle CPU output\n"); 2931 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2914 BIC_NOT_PRESENT(BIC_CPU_LPI); 2932 BIC_NOT_PRESENT(BIC_CPU_LPI);
2933 fclose(fp);
2915 return -1; 2934 return -1;
2916 } 2935 }
2917 2936
@@ -2938,6 +2957,7 @@ int snapshot_sys_lpi_us(void)
2938 if (retval != 1) { 2957 if (retval != 1) {
2939 fprintf(stderr, "Disabling Low Power Idle System output\n"); 2958 fprintf(stderr, "Disabling Low Power Idle System output\n");
2940 BIC_NOT_PRESENT(BIC_SYS_LPI); 2959 BIC_NOT_PRESENT(BIC_SYS_LPI);
2960 fclose(fp);
2941 return -1; 2961 return -1;
2942 } 2962 }
2943 fclose(fp); 2963 fclose(fp);
@@ -2985,8 +3005,6 @@ static void signal_handler (int signal)
2985 fprintf(stderr, "SIGUSR1\n"); 3005 fprintf(stderr, "SIGUSR1\n");
2986 break; 3006 break;
2987 } 3007 }
2988 /* make sure this manually-invoked interval is at least 1ms long */
2989 nanosleep(&one_msec, NULL);
2990} 3008}
2991 3009
2992void setup_signal_handler(void) 3010void setup_signal_handler(void)
@@ -3005,29 +3023,38 @@ void setup_signal_handler(void)
3005 3023
3006void do_sleep(void) 3024void do_sleep(void)
3007{ 3025{
3008 struct timeval select_timeout; 3026 struct timeval tout;
3027 struct timespec rest;
3009 fd_set readfds; 3028 fd_set readfds;
3010 int retval; 3029 int retval;
3011 3030
3012 FD_ZERO(&readfds); 3031 FD_ZERO(&readfds);
3013 FD_SET(0, &readfds); 3032 FD_SET(0, &readfds);
3014 3033
3015 if (!isatty(fileno(stdin))) { 3034 if (ignore_stdin) {
3016 nanosleep(&interval_ts, NULL); 3035 nanosleep(&interval_ts, NULL);
3017 return; 3036 return;
3018 } 3037 }
3019 3038
3020 select_timeout = interval_tv; 3039 tout = interval_tv;
3021 retval = select(1, &readfds, NULL, NULL, &select_timeout); 3040 retval = select(1, &readfds, NULL, NULL, &tout);
3022 3041
3023 if (retval == 1) { 3042 if (retval == 1) {
3024 switch (getc(stdin)) { 3043 switch (getc(stdin)) {
3025 case 'q': 3044 case 'q':
3026 exit_requested = 1; 3045 exit_requested = 1;
3027 break; 3046 break;
3047 case EOF:
3048 /*
3049 * 'stdin' is a pipe closed on the other end. There
3050 * won't be any further input.
3051 */
3052 ignore_stdin = 1;
3053 /* Sleep the rest of the time */
3054 rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
3055 rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
3056 nanosleep(&rest, NULL);
3028 } 3057 }
3029 /* make sure this manually-invoked interval is at least 1ms long */
3030 nanosleep(&one_msec, NULL);
3031 } 3058 }
3032} 3059}
3033 3060
@@ -3209,6 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
3209 break; 3236 break;
3210 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3237 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3211 case INTEL_FAM6_HASWELL_X: /* HSX */ 3238 case INTEL_FAM6_HASWELL_X: /* HSX */
3239 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3212 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3240 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3213 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3241 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3214 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3242 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3405,6 +3433,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
3405 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 3433 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3406 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3434 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3407 case INTEL_FAM6_HASWELL_X: /* HSX */ 3435 case INTEL_FAM6_HASWELL_X: /* HSX */
3436 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3408 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3437 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3409 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3438 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3410 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3439 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3803,6 +3832,7 @@ double get_tdp_amd(unsigned int family)
3803{ 3832{
3804 switch (family) { 3833 switch (family) {
3805 case 0x17: 3834 case 0x17:
3835 case 0x18:
3806 default: 3836 default:
3807 /* This is the max stock TDP of HEDT/Server Fam17h chips */ 3837 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3808 return 250.0; 3838 return 250.0;
@@ -3841,6 +3871,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
3841 case INTEL_FAM6_SANDYBRIDGE: 3871 case INTEL_FAM6_SANDYBRIDGE:
3842 case INTEL_FAM6_IVYBRIDGE: 3872 case INTEL_FAM6_IVYBRIDGE:
3843 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3873 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3874 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3844 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3875 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3845 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3876 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3846 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3877 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3982,6 +4013,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
3982 4013
3983 switch (family) { 4014 switch (family) {
3984 case 0x17: /* Zen, Zen+ */ 4015 case 0x17: /* Zen, Zen+ */
4016 case 0x18: /* Hygon Dhyana */
3985 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; 4017 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
3986 if (rapl_joules) { 4018 if (rapl_joules) {
3987 BIC_PRESENT(BIC_Pkg_J); 4019 BIC_PRESENT(BIC_Pkg_J);
@@ -4002,7 +4034,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
4002 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); 4034 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4003 rapl_power_units = ldexp(1.0, -(msr & 0xf)); 4035 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4004 4036
4005 tdp = get_tdp_amd(model); 4037 tdp = get_tdp_amd(family);
4006 4038
4007 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 4039 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4008 if (!quiet) 4040 if (!quiet)
@@ -4018,7 +4050,7 @@ void rapl_probe(unsigned int family, unsigned int model)
4018{ 4050{
4019 if (genuine_intel) 4051 if (genuine_intel)
4020 rapl_probe_intel(family, model); 4052 rapl_probe_intel(family, model);
4021 if (authentic_amd) 4053 if (authentic_amd || hygon_genuine)
4022 rapl_probe_amd(family, model); 4054 rapl_probe_amd(family, model);
4023} 4055}
4024 4056
@@ -4032,6 +4064,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
4032 4064
4033 switch (model) { 4065 switch (model) {
4034 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4066 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4067 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4035 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4068 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4036 do_gfx_perf_limit_reasons = 1; 4069 do_gfx_perf_limit_reasons = 1;
4037 case INTEL_FAM6_HASWELL_X: /* HSX */ 4070 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -4251,6 +4284,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4251 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 4284 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
4252 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4285 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4253 case INTEL_FAM6_HASWELL_X: /* HSW */ 4286 case INTEL_FAM6_HASWELL_X: /* HSW */
4287 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4254 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4288 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4255 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4289 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4256 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 4290 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -4267,7 +4301,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4267} 4301}
4268 4302
4269/* 4303/*
4270 * HSW adds support for additional MSRs: 4304 * HSW ULT added support for C8/C9/C10 MSRs:
4271 * 4305 *
4272 * MSR_PKG_C8_RESIDENCY 0x00000630 4306 * MSR_PKG_C8_RESIDENCY 0x00000630
4273 * MSR_PKG_C9_RESIDENCY 0x00000631 4307 * MSR_PKG_C9_RESIDENCY 0x00000631
@@ -4278,13 +4312,13 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4278 * MSR_PKGC10_IRTL 0x00000635 4312 * MSR_PKGC10_IRTL 0x00000635
4279 * 4313 *
4280 */ 4314 */
4281int has_hsw_msrs(unsigned int family, unsigned int model) 4315int has_c8910_msrs(unsigned int family, unsigned int model)
4282{ 4316{
4283 if (!genuine_intel) 4317 if (!genuine_intel)
4284 return 0; 4318 return 0;
4285 4319
4286 switch (model) { 4320 switch (model) {
4287 case INTEL_FAM6_HASWELL_CORE: 4321 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4288 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4322 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4289 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4323 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
4290 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4324 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
@@ -4568,9 +4602,6 @@ unsigned int intel_model_duplicates(unsigned int model)
4568 case INTEL_FAM6_XEON_PHI_KNM: 4602 case INTEL_FAM6_XEON_PHI_KNM:
4569 return INTEL_FAM6_XEON_PHI_KNL; 4603 return INTEL_FAM6_XEON_PHI_KNL;
4570 4604
4571 case INTEL_FAM6_HASWELL_ULT:
4572 return INTEL_FAM6_HASWELL_CORE;
4573
4574 case INTEL_FAM6_BROADWELL_X: 4605 case INTEL_FAM6_BROADWELL_X:
4575 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 4606 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
4576 return INTEL_FAM6_BROADWELL_X; 4607 return INTEL_FAM6_BROADWELL_X;
@@ -4582,7 +4613,11 @@ unsigned int intel_model_duplicates(unsigned int model)
4582 return INTEL_FAM6_SKYLAKE_MOBILE; 4613 return INTEL_FAM6_SKYLAKE_MOBILE;
4583 4614
4584 case INTEL_FAM6_ICELAKE_MOBILE: 4615 case INTEL_FAM6_ICELAKE_MOBILE:
4616 case INTEL_FAM6_ICELAKE_NNPI:
4585 return INTEL_FAM6_CANNONLAKE_MOBILE; 4617 return INTEL_FAM6_CANNONLAKE_MOBILE;
4618
4619 case INTEL_FAM6_ATOM_TREMONT_X:
4620 return INTEL_FAM6_ATOM_GOLDMONT_X;
4586 } 4621 }
4587 return model; 4622 return model;
4588} 4623}
@@ -4600,6 +4635,8 @@ void process_cpuid()
4600 genuine_intel = 1; 4635 genuine_intel = 1;
4601 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) 4636 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
4602 authentic_amd = 1; 4637 authentic_amd = 1;
4638 else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
4639 hygon_genuine = 1;
4603 4640
4604 if (!quiet) 4641 if (!quiet)
4605 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 4642 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
@@ -4820,12 +4857,12 @@ void process_cpuid()
4820 BIC_NOT_PRESENT(BIC_CPU_c7); 4857 BIC_NOT_PRESENT(BIC_CPU_c7);
4821 BIC_NOT_PRESENT(BIC_Pkgpc7); 4858 BIC_NOT_PRESENT(BIC_Pkgpc7);
4822 } 4859 }
4823 if (has_hsw_msrs(family, model)) { 4860 if (has_c8910_msrs(family, model)) {
4824 BIC_PRESENT(BIC_Pkgpc8); 4861 BIC_PRESENT(BIC_Pkgpc8);
4825 BIC_PRESENT(BIC_Pkgpc9); 4862 BIC_PRESENT(BIC_Pkgpc9);
4826 BIC_PRESENT(BIC_Pkgpc10); 4863 BIC_PRESENT(BIC_Pkgpc10);
4827 } 4864 }
4828 do_irtl_hsw = has_hsw_msrs(family, model); 4865 do_irtl_hsw = has_c8910_msrs(family, model);
4829 if (has_skl_msrs(family, model)) { 4866 if (has_skl_msrs(family, model)) {
4830 BIC_PRESENT(BIC_Totl_c0); 4867 BIC_PRESENT(BIC_Totl_c0);
4831 BIC_PRESENT(BIC_Any_c0); 4868 BIC_PRESENT(BIC_Any_c0);
@@ -5123,7 +5160,7 @@ int initialize_counters(int cpu_id)
5123 5160
5124void allocate_output_buffer() 5161void allocate_output_buffer()
5125{ 5162{
5126 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 5163 output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
5127 outp = output_buffer; 5164 outp = output_buffer;
5128 if (outp == NULL) 5165 if (outp == NULL)
5129 err(-1, "calloc output buffer"); 5166 err(-1, "calloc output buffer");
@@ -5269,7 +5306,7 @@ int get_and_dump_counters(void)
5269} 5306}
5270 5307
5271void print_version() { 5308void print_version() {
5272 fprintf(outf, "turbostat version 19.03.20" 5309 fprintf(outf, "turbostat version 19.08.31"
5273 " - Len Brown <lenb@kernel.org>\n"); 5310 " - Len Brown <lenb@kernel.org>\n");
5274} 5311}
5275 5312
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 1fdeef864e7c..666b325a62a2 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -9,8 +9,9 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11x86_energy_perf_policy : x86_energy_perf_policy.c 11x86_energy_perf_policy : x86_energy_perf_policy.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -D_FORTIFY_SOURCE=2
14 15
15%: %.c 16%: %.c
16 @mkdir -p $(BUILD_OUTPUT) 17 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 17db1c3af4d0..78c6361898b1 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -40,7 +40,7 @@ in the same processor package.
40Hardware P-States (HWP) are effectively an expansion of hardware 40Hardware P-States (HWP) are effectively an expansion of hardware
41P-state control from the opportunistic turbo-mode P-state range 41P-state control from the opportunistic turbo-mode P-state range
42to include the entire range of available P-states. 42to include the entire range of available P-states.
43On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP. 43On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP.
44That influence was removed in subsequent generations, 44That influence was removed in subsequent generations,
45where it was moved to the 45where it was moved to the
46Energy_Performance_Preference (EPP) field in 46Energy_Performance_Preference (EPP) field in
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 34a796b303fe..3fe1eed900d4 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
545 545
546 progname = argv[0]; 546 progname = argv[0];
547 547
548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw", 548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
549 long_options, &option_index)) != -1) { 549 long_options, &option_index)) != -1) {
550 switch (opt) { 550 switch (opt) {
551 case 'a': 551 case 'a':
@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
1259 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 1259 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1261} 1261}
1262
1263static void get_cpuid_or_exit(unsigned int leaf,
1264 unsigned int *eax, unsigned int *ebx,
1265 unsigned int *ecx, unsigned int *edx)
1266{
1267 if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
1268 errx(1, "Processor not supported\n");
1269}
1270
1262/* 1271/*
1263 * early_cpuid() 1272 * early_cpuid()
1264 * initialize turbo_is_enabled, has_hwp, has_epb 1273 * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
1266 */ 1275 */
1267void early_cpuid(void) 1276void early_cpuid(void)
1268{ 1277{
1269 unsigned int eax, ebx, ecx, edx, max_level; 1278 unsigned int eax, ebx, ecx, edx;
1270 unsigned int fms, family, model; 1279 unsigned int fms, family, model;
1271 1280
1272 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1281 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1273
1274 if (max_level < 6)
1275 errx(1, "Processor not supported\n");
1276
1277 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1278 family = (fms >> 8) & 0xf; 1282 family = (fms >> 8) & 0xf;
1279 model = (fms >> 4) & 0xf; 1283 model = (fms >> 4) & 0xf;
1280 if (family == 6 || family == 0xf) 1284 if (family == 6 || family == 0xf)
@@ -1288,7 +1292,7 @@ void early_cpuid(void)
1288 bdx_highest_ratio = msr & 0xFF; 1292 bdx_highest_ratio = msr & 0xFF;
1289 } 1293 }
1290 1294
1291 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1295 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1292 turbo_is_enabled = (eax >> 1) & 1; 1296 turbo_is_enabled = (eax >> 1) & 1;
1293 has_hwp = (eax >> 7) & 1; 1297 has_hwp = (eax >> 7) & 1;
1294 has_epb = (ecx >> 3) & 1; 1298 has_epb = (ecx >> 3) & 1;
@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
1306 1310
1307 eax = ebx = ecx = edx = 0; 1311 eax = ebx = ecx = edx = 0;
1308 1312
1309 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1313 get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
1310 1314
1311 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 1315 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1312 genuine_intel = 1; 1316 genuine_intel = 1;
@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
1315 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", 1319 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
1316 (char *)&ebx, (char *)&edx, (char *)&ecx); 1320 (char *)&ebx, (char *)&edx, (char *)&ecx);
1317 1321
1318 __get_cpuid(1, &fms, &ebx, &ecx, &edx); 1322 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1319 family = (fms >> 8) & 0xf; 1323 family = (fms >> 8) & 0xf;
1320 model = (fms >> 4) & 0xf; 1324 model = (fms >> 4) & 0xf;
1321 stepping = fms & 0xf; 1325 stepping = fms & 0xf;
@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
1340 errx(1, "CPUID: no MSR"); 1344 errx(1, "CPUID: no MSR");
1341 1345
1342 1346
1343 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1347 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1344 /* turbo_is_enabled already set */ 1348 /* turbo_is_enabled already set */
1345 /* has_hwp already set */ 1349 /* has_hwp already set */
1346 has_hwp_notify = eax & (1 << 8); 1350 has_hwp_notify = eax & (1 << 8);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 495066bafbe3..ded7a950dc40 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -32,7 +32,6 @@ EXTRA_WARNINGS += -Wno-system-headers
32EXTRA_WARNINGS += -Wold-style-definition 32EXTRA_WARNINGS += -Wold-style-definition
33EXTRA_WARNINGS += -Wpacked 33EXTRA_WARNINGS += -Wpacked
34EXTRA_WARNINGS += -Wredundant-decls 34EXTRA_WARNINGS += -Wredundant-decls
35EXTRA_WARNINGS += -Wshadow
36EXTRA_WARNINGS += -Wstrict-prototypes 35EXTRA_WARNINGS += -Wstrict-prototypes
37EXTRA_WARNINGS += -Wswitch-default 36EXTRA_WARNINGS += -Wswitch-default
38EXTRA_WARNINGS += -Wswitch-enum 37EXTRA_WARNINGS += -Wswitch-enum
@@ -69,8 +68,16 @@ endif
69# will do for now and keep the above -Wstrict-aliasing=3 in place 68# will do for now and keep the above -Wstrict-aliasing=3 in place
70# in newer systems. 69# in newer systems.
71# Needed for the __raw_cmpxchg in tools/arch/x86/include/asm/cmpxchg.h 70# Needed for the __raw_cmpxchg in tools/arch/x86/include/asm/cmpxchg.h
71#
72# See https://lkml.org/lkml/2006/11/28/253 and https://gcc.gnu.org/gcc-4.8/changes.html,
73# that takes into account Linus's comments (search for Wshadow) for the reasoning about
74# -Wshadow not being interesting before gcc 4.8.
75
72ifneq ($(filter 3.%,$(MAKE_VERSION)),) # make-3 76ifneq ($(filter 3.%,$(MAKE_VERSION)),) # make-3
73EXTRA_WARNINGS += -fno-strict-aliasing 77EXTRA_WARNINGS += -fno-strict-aliasing
78EXTRA_WARNINGS += -Wno-shadow
79else
80EXTRA_WARNINGS += -Wshadow
74endif 81endif
75 82
76ifneq ($(findstring $(MAKEFLAGS), w),w) 83ifneq ($(findstring $(MAKEFLAGS), w),w)
diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl
index 72525426654b..6fd864935319 100755
--- a/tools/testing/ktest/config-bisect.pl
+++ b/tools/testing/ktest/config-bisect.pl
@@ -663,7 +663,7 @@ while ($#ARGV >= 0) {
663 } 663 }
664 664
665 else { 665 else {
666 die "Unknow option $opt\n"; 666 die "Unknown option $opt\n";
667 } 667 }
668} 668}
669 669
@@ -732,7 +732,7 @@ if ($start) {
732 } 732 }
733 } 733 }
734 run_command "cp $good_start $good" or die "failed to copy to $good\n"; 734 run_command "cp $good_start $good" or die "failed to copy to $good\n";
735 run_command "cp $bad_start $bad" or die "faield to copy to $bad\n"; 735 run_command "cp $bad_start $bad" or die "failed to copy to $bad\n";
736} else { 736} else {
737 if ( ! -f $good ) { 737 if ( ! -f $good ) {
738 die "Can not find file $good\n"; 738 die "Can not find file $good\n";
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 11c9c62c3362..96752ebd938f 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) 34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
35TEST_GEN_FILES = $(BPF_OBJ_FILES) 35TEST_GEN_FILES = $(BPF_OBJ_FILES)
36 36
37BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
38TEST_FILES = $(BTF_C_FILES)
39
37# Also test sub-register code-gen if LLVM has eBPF v3 processor support which 40# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
38# contains both ALU32 and JMP32 instructions. 41# contains both ALU32 and JMP32 instructions.
39SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ 42SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
@@ -57,7 +60,8 @@ TEST_PROGS := test_kmod.sh \
57 test_lirc_mode2.sh \ 60 test_lirc_mode2.sh \
58 test_skb_cgroup_id.sh \ 61 test_skb_cgroup_id.sh \
59 test_flow_dissector.sh \ 62 test_flow_dissector.sh \
60 test_xdp_vlan.sh \ 63 test_xdp_vlan_mode_generic.sh \
64 test_xdp_vlan_mode_native.sh \
61 test_lwt_ip_encap.sh \ 65 test_lwt_ip_encap.sh \
62 test_tcp_check_syncookie.sh \ 66 test_tcp_check_syncookie.sh \
63 test_tc_tunnel.sh \ 67 test_tc_tunnel.sh \
@@ -67,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
67TEST_PROGS_EXTENDED := with_addr.sh \ 71TEST_PROGS_EXTENDED := with_addr.sh \
68 with_tunnels.sh \ 72 with_tunnels.sh \
69 tcp_client.py \ 73 tcp_client.py \
70 tcp_server.py 74 tcp_server.py \
75 test_xdp_vlan.sh
71 76
72# Compile but not part of 'make run_tests' 77# Compile but not part of 'make run_tests'
73TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 78TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index f7a0744db31e..5dc109f4c097 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
34CONFIG_MPLS_ROUTING=m 34CONFIG_MPLS_ROUTING=m
35CONFIG_MPLS_IPTUNNEL=m 35CONFIG_MPLS_IPTUNNEL=m
36CONFIG_IPV6_SIT=m 36CONFIG_IPV6_SIT=m
37CONFIG_BPF_JIT=y
diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 5aeaa284fc47..a68062820410 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
41 } 41 }
42 42
43 /* Rewrite destination. */ 43 /* Rewrite destination. */
44 if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) && 44 if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
45 ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
46 ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0); 45 ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
47 ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1); 46 ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
48 ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2); 47 ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c
index 8f850823d35f..6e75dd3cb14f 100644
--- a/tools/testing/selftests/bpf/test_btf_dump.c
+++ b/tools/testing/selftests/bpf/test_btf_dump.c
@@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
97 } 97 }
98 98
99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); 99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
100 if (access(test_file, R_OK) == -1)
101 /*
102 * When the test is run with O=, kselftest copies TEST_FILES
103 * without preserving the directory structure.
104 */
105 snprintf(test_file, sizeof(test_file), "%s.c",
106 test_case->name);
100 /* 107 /*
101 * Diff test output and expected test output, contained between 108 * Diff test output and expected test output, contained between
102 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. 109 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 2fc4625c1a15..655729004391 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -20,9 +20,9 @@ int main(int argc, char **argv)
20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
22 BPF_FUNC_get_local_storage), 22 BPF_FUNC_get_local_storage),
23 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 23 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), 24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
25 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 25 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
26 26
27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ 27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
@@ -30,7 +30,7 @@ int main(int argc, char **argv)
30 BPF_FUNC_get_local_storage), 30 BPF_FUNC_get_local_storage),
31 BPF_MOV64_IMM(BPF_REG_1, 1), 31 BPF_MOV64_IMM(BPF_REG_1, 1),
32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 33 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), 34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
36 BPF_EXIT_INSN(), 36 BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fb679ac3d4b0..0e6652733462 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_endian.h"
16#include "bpf_rlimit.h" 17#include "bpf_rlimit.h"
17#include "bpf_util.h" 18#include "bpf_util.h"
18 19
@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
232 /* if (ip == expected && port == expected) */ 233 /* if (ip == expected && port == expected) */
233 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 234 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
234 offsetof(struct bpf_sock, src_ip6[3])), 235 offsetof(struct bpf_sock, src_ip6[3])),
235 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), 236 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
237 __bpf_constant_ntohl(0x00000001), 4),
236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 238 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
237 offsetof(struct bpf_sock, src_port)), 239 offsetof(struct bpf_sock, src_port)),
238 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), 240 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
261 /* if (ip == expected && port == expected) */ 263 /* if (ip == expected && port == expected) */
262 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 264 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
263 offsetof(struct bpf_sock, src_ip4)), 265 offsetof(struct bpf_sock, src_ip4)),
264 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), 266 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
267 __bpf_constant_ntohl(0x7F000001), 4),
265 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 268 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
266 offsetof(struct bpf_sock, src_port)), 269 offsetof(struct bpf_sock, src_port)),
267 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), 270 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index 51a3a31d1aac..bb8b0da91686 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -1,6 +1,14 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3# Author: Jesper Dangaard Brouer <hawk@kernel.org>
2 4
3TESTNAME=xdp_vlan 5# Allow wrapper scripts to name test
6if [ -z "$TESTNAME" ]; then
7 TESTNAME=xdp_vlan
8fi
9
10# Default XDP mode
11XDP_MODE=xdpgeneric
4 12
5usage() { 13usage() {
6 echo "Testing XDP + TC eBPF VLAN manipulations: $TESTNAME" 14 echo "Testing XDP + TC eBPF VLAN manipulations: $TESTNAME"
@@ -9,9 +17,23 @@ usage() {
9 echo " -v | --verbose : Verbose" 17 echo " -v | --verbose : Verbose"
10 echo " --flush : Flush before starting (e.g. after --interactive)" 18 echo " --flush : Flush before starting (e.g. after --interactive)"
11 echo " --interactive : Keep netns setup running after test-run" 19 echo " --interactive : Keep netns setup running after test-run"
20 echo " --mode=XXX : Choose XDP mode (xdp | xdpgeneric | xdpdrv)"
12 echo "" 21 echo ""
13} 22}
14 23
24valid_xdp_mode()
25{
26 local mode=$1
27
28 case "$mode" in
29 xdpgeneric | xdpdrv | xdp)
30 return 0
31 ;;
32 *)
33 return 1
34 esac
35}
36
15cleanup() 37cleanup()
16{ 38{
17 local status=$? 39 local status=$?
@@ -37,7 +59,7 @@ cleanup()
37 59
38# Using external program "getopt" to get --long-options 60# Using external program "getopt" to get --long-options
39OPTIONS=$(getopt -o hvfi: \ 61OPTIONS=$(getopt -o hvfi: \
40 --long verbose,flush,help,interactive,debug -- "$@") 62 --long verbose,flush,help,interactive,debug,mode: -- "$@")
41if (( $? != 0 )); then 63if (( $? != 0 )); then
42 usage 64 usage
43 echo "selftests: $TESTNAME [FAILED] Error calling getopt, unknown option?" 65 echo "selftests: $TESTNAME [FAILED] Error calling getopt, unknown option?"
@@ -60,6 +82,11 @@ while true; do
60 cleanup 82 cleanup
61 shift 83 shift
62 ;; 84 ;;
85 --mode )
86 shift
87 XDP_MODE=$1
88 shift
89 ;;
63 -- ) 90 -- )
64 shift 91 shift
65 break 92 break
@@ -81,8 +108,14 @@ if [ "$EUID" -ne 0 ]; then
81 exit 1 108 exit 1
82fi 109fi
83 110
84ip link set dev lo xdp off 2>/dev/null > /dev/null 111valid_xdp_mode $XDP_MODE
85if [ $? -ne 0 ];then 112if [ $? -ne 0 ]; then
113 echo "selftests: $TESTNAME [FAILED] unknown XDP mode ($XDP_MODE)"
114 exit 1
115fi
116
117ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
118if [ $? -ne 0 ]; then
86 echo "selftests: $TESTNAME [SKIP] need ip xdp support" 119 echo "selftests: $TESTNAME [SKIP] need ip xdp support"
87 exit 0 120 exit 0
88fi 121fi
@@ -155,7 +188,7 @@ ip netns exec ns2 ip link set lo up
155# At this point, the hosts cannot reach each-other, 188# At this point, the hosts cannot reach each-other,
156# because ns2 are using VLAN tags on the packets. 189# because ns2 are using VLAN tags on the packets.
157 190
158ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Okay ping fails"' 191ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First ping must fail"'
159 192
160 193
161# Now we can use the test_xdp_vlan.c program to pop/push these VLAN tags 194# Now we can use the test_xdp_vlan.c program to pop/push these VLAN tags
@@ -166,7 +199,7 @@ export FILE=test_xdp_vlan.o
166 199
167# First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change" 200# First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
168export XDP_PROG=xdp_vlan_change 201export XDP_PROG=xdp_vlan_change
169ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG 202ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
170 203
171# In ns1: egress use TC to add back VLAN tag 4011 204# In ns1: egress use TC to add back VLAN tag 4011
172# (del cmd) 205# (del cmd)
@@ -177,8 +210,8 @@ ip netns exec ns1 tc filter add dev $DEVNS1 egress \
177 prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push 210 prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
178 211
179# Now the namespaces can reach each-other, test with ping: 212# Now the namespaces can reach each-other, test with ping:
180ip netns exec ns2 ping -W 2 -c 3 $IPADDR1 213ip netns exec ns2 ping -i 0.2 -W 2 -c 2 $IPADDR1
181ip netns exec ns1 ping -W 2 -c 3 $IPADDR2 214ip netns exec ns1 ping -i 0.2 -W 2 -c 2 $IPADDR2
182 215
183# Second test: Replace xdp prog, that fully remove vlan header 216# Second test: Replace xdp prog, that fully remove vlan header
184# 217#
@@ -187,9 +220,9 @@ ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
187# ETH_P_8021Q indication, and this cause overwriting of our changes. 220# ETH_P_8021Q indication, and this cause overwriting of our changes.
188# 221#
189export XDP_PROG=xdp_vlan_remove_outer2 222export XDP_PROG=xdp_vlan_remove_outer2
190ip netns exec ns1 ip link set $DEVNS1 xdp off 223ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE off
191ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG 224ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
192 225
193# Now the namespaces should still be able reach each-other, test with ping: 226# Now the namespaces should still be able reach each-other, test with ping:
194ip netns exec ns2 ping -W 2 -c 3 $IPADDR1 227ip netns exec ns2 ping -i 0.2 -W 2 -c 2 $IPADDR1
195ip netns exec ns1 ping -W 2 -c 3 $IPADDR2 228ip netns exec ns1 ping -i 0.2 -W 2 -c 2 $IPADDR2
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh
new file mode 100755
index 000000000000..c515326d6d59
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh
@@ -0,0 +1,9 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# Exit on failure
5set -e
6
7# Wrapper script to test generic-XDP
8export TESTNAME=xdp_vlan_mode_generic
9./test_xdp_vlan.sh --mode=xdpgeneric
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh
new file mode 100755
index 000000000000..5cf7ce1f16c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh
@@ -0,0 +1,9 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# Exit on failure
5set -e
6
7# Wrapper script to test native-XDP
8export TESTNAME=xdp_vlan_mode_native
9./test_xdp_vlan.sh --mode=xdpdrv
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index b0fda2877119..d438193804b2 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -975,6 +975,17 @@
975 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 975 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
976}, 976},
977{ 977{
978 "read gso_segs from CGROUP_SKB",
979 .insns = {
980 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
981 offsetof(struct __sk_buff, gso_segs)),
982 BPF_MOV64_IMM(BPF_REG_0, 0),
983 BPF_EXIT_INSN(),
984 },
985 .result = ACCEPT,
986 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
987},
988{
978 "write gso_segs from CGROUP_SKB", 989 "write gso_segs from CGROUP_SKB",
979 .insns = { 990 .insns = {
980 BPF_MOV64_IMM(BPF_REG_0, 0), 991 BPF_MOV64_IMM(BPF_REG_0, 0),
diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c
index 5e980a5ab69d..1fc4e61e9f9f 100644
--- a/tools/testing/selftests/bpf/verifier/loops1.c
+++ b/tools/testing/selftests/bpf/verifier/loops1.c
@@ -159,3 +159,31 @@
159 .errstr = "loop detected", 159 .errstr = "loop detected",
160 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 160 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
161}, 161},
162{
163 "not-taken loop with back jump to 1st insn",
164 .insns = {
165 BPF_MOV64_IMM(BPF_REG_0, 123),
166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2),
167 BPF_EXIT_INSN(),
168 },
169 .result = ACCEPT,
170 .prog_type = BPF_PROG_TYPE_XDP,
171 .retval = 123,
172},
173{
174 "taken loop with back jump to 1st insn",
175 .insns = {
176 BPF_MOV64_IMM(BPF_REG_1, 10),
177 BPF_MOV64_IMM(BPF_REG_2, 0),
178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
179 BPF_EXIT_INSN(),
180 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
181 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
182 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3),
183 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
184 BPF_EXIT_INSN(),
185 },
186 .result = ACCEPT,
187 .prog_type = BPF_PROG_TYPE_XDP,
188 .retval = 55,
189},
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 4c223266299a..bdb69599c4bd 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len)
191 strtok(NULL, delim); 191 strtok(NULL, delim);
192 strtok(NULL, delim); 192 strtok(NULL, delim);
193 193
194 if (strcmp(fs, "cgroup") == 0 && 194 if (strcmp(type, "cgroup2") == 0) {
195 strcmp(type, "cgroup2") == 0) {
196 strncpy(root, mount, len); 195 strncpy(root, mount, len);
197 return 0; 196 return 0;
198 } 197 }
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index 71231ad2dbfb..47315fe48d5a 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -262,7 +262,7 @@ test_mc_aware()
262 262
263 stop_traffic 263 stop_traffic
264 264
265 log_test "UC performace under MC overload" 265 log_test "UC performance under MC overload"
266 266
267 echo "UC-only throughput $(humanize $ucth1)" 267 echo "UC-only throughput $(humanize $ucth1)"
268 echo "UC+MC throughput $(humanize $ucth2)" 268 echo "UC+MC throughput $(humanize $ucth2)"
@@ -316,7 +316,7 @@ test_uc_aware()
316 316
317 stop_traffic 317 stop_traffic
318 318
319 log_test "MC performace under UC overload" 319 log_test "MC performance under UC overload"
320 echo " ingress UC throughput $(humanize ${uc_ir})" 320 echo " ingress UC throughput $(humanize ${uc_ir})"
321 echo " egress UC throughput $(humanize ${uc_er})" 321 echo " egress UC throughput $(humanize ${uc_er})"
322 echo " sent $attempts BC ARPs, got $passes responses" 322 echo " sent $attempts BC ARPs, got $passes responses"
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
index 0a76314b4414..8b944cf042f6 100755
--- a/tools/testing/selftests/kmod/kmod.sh
+++ b/tools/testing/selftests/kmod/kmod.sh
@@ -28,7 +28,7 @@
28# override by exporting to your environment prior running this script. 28# override by exporting to your environment prior running this script.
29# For instance this script assumes you do not have xfs loaded upon boot. 29# For instance this script assumes you do not have xfs loaded upon boot.
30# If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this 30# If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this
31# script if the filesyste module you don't have loaded upon bootup 31# script if the filesystem module you don't have loaded upon bootup
32# is ext4 instead. Refer to allow_user_defaults() for a list of user 32# is ext4 instead. Refer to allow_user_defaults() for a list of user
33# override variables possible. 33# override variables possible.
34# 34#
@@ -263,7 +263,7 @@ config_get_test_result()
263config_reset() 263config_reset()
264{ 264{
265 if ! echo -n "1" >"$DIR"/reset; then 265 if ! echo -n "1" >"$DIR"/reset; then
266 echo "$0: reset shuld have worked" >&2 266 echo "$0: reset should have worked" >&2
267 exit 1 267 exit 1
268 fi 268 fi
269} 269}
@@ -488,7 +488,7 @@ usage()
488 echo Example uses: 488 echo Example uses:
489 echo 489 echo
490 echo "${TEST_NAME}.sh -- executes all tests" 490 echo "${TEST_NAME}.sh -- executes all tests"
491 echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recomended" 491 echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recommended"
492 echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs" 492 echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs"
493 echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once" 493 echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once"
494 echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times" 494 echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times"
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index ec15c4f6af55..0ac49d91a260 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -10,6 +10,7 @@
10#ifndef __KSELFTEST_H 10#ifndef __KSELFTEST_H
11#define __KSELFTEST_H 11#define __KSELFTEST_H
12 12
13#include <errno.h>
13#include <stdlib.h> 14#include <stdlib.h>
14#include <unistd.h> 15#include <unistd.h>
15#include <stdarg.h> 16#include <stdarg.h>
@@ -81,58 +82,68 @@ static inline void ksft_print_cnts(void)
81 82
82static inline void ksft_print_msg(const char *msg, ...) 83static inline void ksft_print_msg(const char *msg, ...)
83{ 84{
85 int saved_errno = errno;
84 va_list args; 86 va_list args;
85 87
86 va_start(args, msg); 88 va_start(args, msg);
87 printf("# "); 89 printf("# ");
90 errno = saved_errno;
88 vprintf(msg, args); 91 vprintf(msg, args);
89 va_end(args); 92 va_end(args);
90} 93}
91 94
92static inline void ksft_test_result_pass(const char *msg, ...) 95static inline void ksft_test_result_pass(const char *msg, ...)
93{ 96{
97 int saved_errno = errno;
94 va_list args; 98 va_list args;
95 99
96 ksft_cnt.ksft_pass++; 100 ksft_cnt.ksft_pass++;
97 101
98 va_start(args, msg); 102 va_start(args, msg);
99 printf("ok %d ", ksft_test_num()); 103 printf("ok %d ", ksft_test_num());
104 errno = saved_errno;
100 vprintf(msg, args); 105 vprintf(msg, args);
101 va_end(args); 106 va_end(args);
102} 107}
103 108
104static inline void ksft_test_result_fail(const char *msg, ...) 109static inline void ksft_test_result_fail(const char *msg, ...)
105{ 110{
111 int saved_errno = errno;
106 va_list args; 112 va_list args;
107 113
108 ksft_cnt.ksft_fail++; 114 ksft_cnt.ksft_fail++;
109 115
110 va_start(args, msg); 116 va_start(args, msg);
111 printf("not ok %d ", ksft_test_num()); 117 printf("not ok %d ", ksft_test_num());
118 errno = saved_errno;
112 vprintf(msg, args); 119 vprintf(msg, args);
113 va_end(args); 120 va_end(args);
114} 121}
115 122
116static inline void ksft_test_result_skip(const char *msg, ...) 123static inline void ksft_test_result_skip(const char *msg, ...)
117{ 124{
125 int saved_errno = errno;
118 va_list args; 126 va_list args;
119 127
120 ksft_cnt.ksft_xskip++; 128 ksft_cnt.ksft_xskip++;
121 129
122 va_start(args, msg); 130 va_start(args, msg);
123 printf("not ok %d # SKIP ", ksft_test_num()); 131 printf("not ok %d # SKIP ", ksft_test_num());
132 errno = saved_errno;
124 vprintf(msg, args); 133 vprintf(msg, args);
125 va_end(args); 134 va_end(args);
126} 135}
127 136
128static inline void ksft_test_result_error(const char *msg, ...) 137static inline void ksft_test_result_error(const char *msg, ...)
129{ 138{
139 int saved_errno = errno;
130 va_list args; 140 va_list args;
131 141
132 ksft_cnt.ksft_error++; 142 ksft_cnt.ksft_error++;
133 143
134 va_start(args, msg); 144 va_start(args, msg);
135 printf("not ok %d # error ", ksft_test_num()); 145 printf("not ok %d # error ", ksft_test_num());
146 errno = saved_errno;
136 vprintf(msg, args); 147 vprintf(msg, args);
137 va_end(args); 148 va_end(args);
138} 149}
@@ -152,10 +163,12 @@ static inline int ksft_exit_fail(void)
152 163
153static inline int ksft_exit_fail_msg(const char *msg, ...) 164static inline int ksft_exit_fail_msg(const char *msg, ...)
154{ 165{
166 int saved_errno = errno;
155 va_list args; 167 va_list args;
156 168
157 va_start(args, msg); 169 va_start(args, msg);
158 printf("Bail out! "); 170 printf("Bail out! ");
171 errno = saved_errno;
159 vprintf(msg, args); 172 vprintf(msg, args);
160 va_end(args); 173 va_end(args);
161 174
@@ -178,10 +191,12 @@ static inline int ksft_exit_xpass(void)
178static inline int ksft_exit_skip(const char *msg, ...) 191static inline int ksft_exit_skip(const char *msg, ...)
179{ 192{
180 if (msg) { 193 if (msg) {
194 int saved_errno = errno;
181 va_list args; 195 va_list args;
182 196
183 va_start(args, msg); 197 va_start(args, msg);
184 printf("not ok %d # SKIP ", 1 + ksft_test_num()); 198 printf("not ok %d # SKIP ", 1 + ksft_test_num());
199 errno = saved_errno;
185 vprintf(msg, args); 200 vprintf(msg, args);
186 va_end(args); 201 va_end(args);
187 } else { 202 } else {
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 41266af0d3dc..b35da375530a 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,7 +1,7 @@
1/s390x/sync_regs_test
1/x86_64/cr4_cpuid_sync_test 2/x86_64/cr4_cpuid_sync_test
2/x86_64/evmcs_test 3/x86_64/evmcs_test
3/x86_64/hyperv_cpuid 4/x86_64/hyperv_cpuid
4/x86_64/kvm_create_max_vcpus
5/x86_64/mmio_warning_test 5/x86_64/mmio_warning_test
6/x86_64/platform_info_test 6/x86_64/platform_info_test
7/x86_64/set_sregs_test 7/x86_64/set_sregs_test
@@ -13,3 +13,4 @@
13/x86_64/vmx_tsc_adjust_test 13/x86_64/vmx_tsc_adjust_test
14/clear_dirty_log_test 14/clear_dirty_log_test
15/dirty_log_test 15/dirty_log_test
16/kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644
index 000000000000..63ed533f73d6
--- /dev/null
+++ b/tools/testing/selftests/kvm/config
@@ -0,0 +1,3 @@
1CONFIG_KVM=y
2CONFIG_KVM_INTEL=y
3CONFIG_KVM_AMD=y
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
index 4059014d93ea..4912d23844bc 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
220struct hv_enlightened_vmcs *current_evmcs; 220struct hv_enlightened_vmcs *current_evmcs;
221struct hv_vp_assist_page *current_vp_assist; 221struct hv_vp_assist_page *current_vp_assist;
222 222
223int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
224
223static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 225static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
224{ 226{
225 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 227 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6cb34a0fa200..0a5e487dbc50 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", 1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1061 r); 1061 r);
1062 1062
1063 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); 1063 if (kvm_check_cap(KVM_CAP_XCRS)) {
1064 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", 1064 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1065 r); 1065 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1066 r);
1067 }
1066 1068
1067 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); 1069 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1068 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", 1070 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1103 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", 1105 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1104 r); 1106 r);
1105 1107
1106 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); 1108 if (kvm_check_cap(KVM_CAP_XCRS)) {
1107 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", 1109 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1108 r); 1110 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1111 r);
1112 }
1109 1113
1110 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); 1114 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1111 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", 1115 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 204f847bd065..9cef0455b819 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -12,6 +12,26 @@
12 12
13bool enable_evmcs; 13bool enable_evmcs;
14 14
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{
17 uint16_t evmcs_ver;
18
19 struct kvm_enable_cap enable_evmcs_cap = {
20 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
21 .args[0] = (unsigned long)&evmcs_ver
22 };
23
24 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
25
26 /* KVM should return supported EVMCS version range */
27 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
28 (evmcs_ver & 0xff) > 0,
29 "Incorrect EVMCS version range: %x:%x\n",
30 evmcs_ver & 0xff, evmcs_ver >> 8);
31
32 return evmcs_ver;
33}
34
15/* Allocate memory regions for nested VMX tests. 35/* Allocate memory regions for nested VMX tests.
16 * 36 *
17 * Input Args: 37 * Input Args:
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index f95c08343b48..92915e6408e7 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -79,11 +79,6 @@ int main(int argc, char *argv[])
79 struct kvm_x86_state *state; 79 struct kvm_x86_state *state;
80 struct ucall uc; 80 struct ucall uc;
81 int stage; 81 int stage;
82 uint16_t evmcs_ver;
83 struct kvm_enable_cap enable_evmcs_cap = {
84 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
85 .args[0] = (unsigned long)&evmcs_ver
86 };
87 82
88 /* Create VM */ 83 /* Create VM */
89 vm = vm_create_default(VCPU_ID, 0, guest_code); 84 vm = vm_create_default(VCPU_ID, 0, guest_code);
@@ -96,13 +91,7 @@ int main(int argc, char *argv[])
96 exit(KSFT_SKIP); 91 exit(KSFT_SKIP);
97 } 92 }
98 93
99 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 94 vcpu_enable_evmcs(vm, VCPU_ID);
100
101 /* KVM should return supported EVMCS version range */
102 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
103 (evmcs_ver & 0xff) > 0,
104 "Incorrect EVMCS version range: %x:%x\n",
105 evmcs_ver & 0xff, evmcs_ver >> 8);
106 95
107 run = vcpu_state(vm, VCPU_ID); 96 run = vcpu_state(vm, VCPU_ID);
108 97
@@ -146,7 +135,7 @@ int main(int argc, char *argv[])
146 kvm_vm_restart(vm, O_RDWR); 135 kvm_vm_restart(vm, O_RDWR);
147 vm_vcpu_add(vm, VCPU_ID); 136 vm_vcpu_add(vm, VCPU_ID);
148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 137 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
149 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 138 vcpu_enable_evmcs(vm, VCPU_ID);
150 vcpu_load_state(vm, VCPU_ID, state); 139 vcpu_load_state(vm, VCPU_ID, state);
151 run = vcpu_state(vm, VCPU_ID); 140 run = vcpu_state(vm, VCPU_ID);
152 free(state); 141 free(state);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index f72b3043db0e..ee59831fbc98 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -18,6 +18,7 @@
18#include "test_util.h" 18#include "test_util.h"
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "processor.h" 20#include "processor.h"
21#include "vmx.h"
21 22
22#define VCPU_ID 0 23#define VCPU_ID 0
23 24
@@ -106,12 +107,7 @@ int main(int argc, char *argv[])
106{ 107{
107 struct kvm_vm *vm; 108 struct kvm_vm *vm;
108 int rv; 109 int rv;
109 uint16_t evmcs_ver;
110 struct kvm_cpuid2 *hv_cpuid_entries; 110 struct kvm_cpuid2 *hv_cpuid_entries;
111 struct kvm_enable_cap enable_evmcs_cap = {
112 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
113 .args[0] = (unsigned long)&evmcs_ver
114 };
115 111
116 /* Tell stdout not to buffer its content */ 112 /* Tell stdout not to buffer its content */
117 setbuf(stdout, NULL); 113 setbuf(stdout, NULL);
@@ -136,14 +132,14 @@ int main(int argc, char *argv[])
136 132
137 free(hv_cpuid_entries); 133 free(hv_cpuid_entries);
138 134
139 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 135 if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
140
141 if (rv) {
142 fprintf(stderr, 136 fprintf(stderr,
143 "Enlightened VMCS is unsupported, skip related test\n"); 137 "Enlightened VMCS is unsupported, skip related test\n");
144 goto vm_free; 138 goto vm_free;
145 } 139 }
146 140
141 vcpu_enable_evmcs(vm, VCPU_ID);
142
147 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); 143 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
148 if (!hv_cpuid_entries) 144 if (!hv_cpuid_entries)
149 return 1; 145 return 1;
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 40050e44ec0a..f9334bd3cce9 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -99,8 +99,8 @@ int main(int argc, char *argv[])
99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); 99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, 100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
102 test_msr_platform_info_disabled(vm);
103 test_msr_platform_info_enabled(vm); 102 test_msr_platform_info_enabled(vm);
103 test_msr_platform_info_disabled(vm);
104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); 104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
105 105
106 kvm_vm_free(vm); 106 kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index ed7218d166da..853e370e8a39 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -25,24 +25,17 @@
25#define VMCS12_REVISION 0x11e57ed0 25#define VMCS12_REVISION 0x11e57ed0
26#define VCPU_ID 5 26#define VCPU_ID 5
27 27
28bool have_evmcs;
29
28void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) 30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
29{ 31{
30 volatile struct kvm_run *run;
31
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); 32 vcpu_nested_state_set(vm, VCPU_ID, state, false);
33 run = vcpu_state(vm, VCPU_ID);
34 vcpu_run(vm, VCPU_ID);
35 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
36 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
37 run->exit_reason,
38 exit_reason_str(run->exit_reason));
39} 33}
40 34
41void test_nested_state_expect_errno(struct kvm_vm *vm, 35void test_nested_state_expect_errno(struct kvm_vm *vm,
42 struct kvm_nested_state *state, 36 struct kvm_nested_state *state,
43 int expected_errno) 37 int expected_errno)
44{ 38{
45 volatile struct kvm_run *run;
46 int rv; 39 int rv;
47 40
48 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); 41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
50 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
51 strerror(expected_errno), expected_errno, rv, strerror(errno), 44 strerror(expected_errno), expected_errno, rv, strerror(errno),
52 errno); 45 errno);
53 run = vcpu_state(vm, VCPU_ID);
54 vcpu_run(vm, VCPU_ID);
55 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
56 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
57 run->exit_reason,
58 exit_reason_str(run->exit_reason));
59} 46}
60 47
61void test_nested_state_expect_einval(struct kvm_vm *vm, 48void test_nested_state_expect_einval(struct kvm_vm *vm,
@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
90{ 77{
91 memset(state, 0, size); 78 memset(state, 0, size);
92 state->flags = KVM_STATE_NESTED_GUEST_MODE | 79 state->flags = KVM_STATE_NESTED_GUEST_MODE |
93 KVM_STATE_NESTED_RUN_PENDING | 80 KVM_STATE_NESTED_RUN_PENDING;
94 KVM_STATE_NESTED_EVMCS; 81 if (have_evmcs)
82 state->flags |= KVM_STATE_NESTED_EVMCS;
95 state->format = 0; 83 state->format = 0;
96 state->size = size; 84 state->size = size;
97 state->hdr.vmx.vmxon_pa = 0x1000; 85 state->hdr.vmx.vmxon_pa = 0x1000;
@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
141 /* 129 /*
142 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without 130 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
143 * setting the nested state but flags other than eVMCS must be clear. 131 * setting the nested state but flags other than eVMCS must be clear.
132 * The eVMCS flag can be set if the enlightened VMCS capability has
133 * been enabled.
144 */ 134 */
145 set_default_vmx_state(state, state_sz); 135 set_default_vmx_state(state, state_sz);
146 state->hdr.vmx.vmxon_pa = -1ull; 136 state->hdr.vmx.vmxon_pa = -1ull;
147 state->hdr.vmx.vmcs12_pa = -1ull; 137 state->hdr.vmx.vmcs12_pa = -1ull;
148 test_nested_state_expect_einval(vm, state); 138 test_nested_state_expect_einval(vm, state);
149 139
150 state->flags = KVM_STATE_NESTED_EVMCS; 140 state->flags &= KVM_STATE_NESTED_EVMCS;
141 if (have_evmcs) {
142 test_nested_state_expect_einval(vm, state);
143 vcpu_enable_evmcs(vm, VCPU_ID);
144 }
151 test_nested_state(vm, state); 145 test_nested_state(vm, state);
152 146
153 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ 147 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
@@ -232,6 +226,8 @@ int main(int argc, char *argv[])
232 struct kvm_nested_state state; 226 struct kvm_nested_state state;
233 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 227 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
234 228
229 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
230
235 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { 231 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
236 printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); 232 printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
237 exit(KSFT_SKIP); 233 exit(KSFT_SKIP);
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 30195449c63c..79b0affd21fb 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -13,6 +13,14 @@ function log() {
13 echo "$1" > /dev/kmsg 13 echo "$1" > /dev/kmsg
14} 14}
15 15
16# skip(msg) - testing can't proceed
17# msg - explanation
18function skip() {
19 log "SKIP: $1"
20 echo "SKIP: $1" >&2
21 exit 4
22}
23
16# die(msg) - game over, man 24# die(msg) - game over, man
17# msg - dying words 25# msg - dying words
18function die() { 26function die() {
@@ -21,13 +29,27 @@ function die() {
21 exit 1 29 exit 1
22} 30}
23 31
24# set_dynamic_debug() - setup kernel dynamic debug 32function push_dynamic_debug() {
25# TODO - push and pop this config? 33 DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
34 awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
35}
36
37function pop_dynamic_debug() {
38 if [[ -n "$DYNAMIC_DEBUG" ]]; then
39 echo -n "$DYNAMIC_DEBUG" > /sys/kernel/debug/dynamic_debug/control
40 fi
41}
42
43# set_dynamic_debug() - save the current dynamic debug config and tweak
44# it for the self-tests. Set a script exit trap
45# that restores the original config.
26function set_dynamic_debug() { 46function set_dynamic_debug() {
27 cat << EOF > /sys/kernel/debug/dynamic_debug/control 47 push_dynamic_debug
28file kernel/livepatch/* +p 48 trap pop_dynamic_debug EXIT INT TERM HUP
29func klp_try_switch_task -p 49 cat <<-EOF > /sys/kernel/debug/dynamic_debug/control
30EOF 50 file kernel/livepatch/* +p
51 func klp_try_switch_task -p
52 EOF
31} 53}
32 54
33# loop_until(cmd) - loop a command until it is successful or $MAX_RETRIES, 55# loop_until(cmd) - loop a command until it is successful or $MAX_RETRIES,
@@ -43,6 +65,12 @@ function loop_until() {
43 done 65 done
44} 66}
45 67
68function assert_mod() {
69 local mod="$1"
70
71 modprobe --dry-run "$mod" &>/dev/null
72}
73
46function is_livepatch_mod() { 74function is_livepatch_mod() {
47 local mod="$1" 75 local mod="$1"
48 76
@@ -75,6 +103,9 @@ function __load_mod() {
75function load_mod() { 103function load_mod() {
76 local mod="$1"; shift 104 local mod="$1"; shift
77 105
106 assert_mod "$mod" ||
107 skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
108
78 is_livepatch_mod "$mod" && 109 is_livepatch_mod "$mod" &&
79 die "use load_lp() to load the livepatch module $mod" 110 die "use load_lp() to load the livepatch module $mod"
80 111
@@ -88,6 +119,9 @@ function load_mod() {
88function load_lp_nowait() { 119function load_lp_nowait() {
89 local mod="$1"; shift 120 local mod="$1"; shift
90 121
122 assert_mod "$mod" ||
123 skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
124
91 is_livepatch_mod "$mod" || 125 is_livepatch_mod "$mod" ||
92 die "module $mod is not a livepatch" 126 die "module $mod is not a livepatch"
93 127
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 4ce0bc1612f5..c7cced739c34 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -17,7 +17,7 @@ tcp_inq
17tls 17tls
18txring_overwrite 18txring_overwrite
19ip_defrag 19ip_defrag
20ipv6_flowlabel
21ipv6_flowlabel_mgr
20so_txtime 22so_txtime
21flowlabel
22flowlabel_mgr
23tcp_fastopen_backup_key 23tcp_fastopen_backup_key
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index cca2baa03fb8..a8d8e8b3dc81 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -93,18 +93,10 @@ sw1_create()
93 ip route add vrf v$ol1 192.0.2.16/28 \ 93 ip route add vrf v$ol1 192.0.2.16/28 \
94 nexthop dev g1a \ 94 nexthop dev g1a \
95 nexthop dev g1b 95 nexthop dev g1b
96
97 tc qdisc add dev $ul1 clsact
98 tc filter add dev $ul1 egress pref 111 prot ipv4 \
99 flower dst_ip 192.0.2.66 action pass
100 tc filter add dev $ul1 egress pref 222 prot ipv4 \
101 flower dst_ip 192.0.2.82 action pass
102} 96}
103 97
104sw1_destroy() 98sw1_destroy()
105{ 99{
106 tc qdisc del dev $ul1 clsact
107
108 ip route del vrf v$ol1 192.0.2.16/28 100 ip route del vrf v$ol1 192.0.2.16/28
109 101
110 ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146 102 ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
@@ -139,10 +131,18 @@ sw2_create()
139 ip route add vrf v$ol2 192.0.2.0/28 \ 131 ip route add vrf v$ol2 192.0.2.0/28 \
140 nexthop dev g2a \ 132 nexthop dev g2a \
141 nexthop dev g2b 133 nexthop dev g2b
134
135 tc qdisc add dev $ul2 clsact
136 tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
137 flower vlan_id 111 action pass
138 tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
139 flower vlan_id 222 action pass
142} 140}
143 141
144sw2_destroy() 142sw2_destroy()
145{ 143{
144 tc qdisc del dev $ul2 clsact
145
146 ip route del vrf v$ol2 192.0.2.0/28 146 ip route del vrf v$ol2 192.0.2.0/28
147 147
148 ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145 148 ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
@@ -187,12 +187,16 @@ setup_prepare()
187 sw1_create 187 sw1_create
188 sw2_create 188 sw2_create
189 h2_create 189 h2_create
190
191 forwarding_enable
190} 192}
191 193
192cleanup() 194cleanup()
193{ 195{
194 pre_cleanup 196 pre_cleanup
195 197
198 forwarding_restore
199
196 h2_destroy 200 h2_destroy
197 sw2_destroy 201 sw2_destroy
198 sw1_destroy 202 sw1_destroy
@@ -211,15 +215,15 @@ multipath4_test()
211 nexthop dev g1a weight $weight1 \ 215 nexthop dev g1a weight $weight1 \
212 nexthop dev g1b weight $weight2 216 nexthop dev g1b weight $weight2
213 217
214 local t0_111=$(tc_rule_stats_get $ul1 111 egress) 218 local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
215 local t0_222=$(tc_rule_stats_get $ul1 222 egress) 219 local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
216 220
217 ip vrf exec v$h1 \ 221 ip vrf exec v$h1 \
218 $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \ 222 $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
219 -d 1msec -t udp "sp=1024,dp=0-32768" 223 -d 1msec -t udp "sp=1024,dp=0-32768"
220 224
221 local t1_111=$(tc_rule_stats_get $ul1 111 egress) 225 local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
222 local t1_222=$(tc_rule_stats_get $ul1 222 egress) 226 local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
223 227
224 local d111=$((t1_111 - t0_111)) 228 local d111=$((t1_111 - t0_111))
225 local d222=$((t1_222 - t0_222)) 229 local d222=$((t1_222 - t0_222))
diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
index 41476399e184..f6e65674b83c 100755
--- a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
+++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
@@ -30,7 +30,7 @@ do_test() {
30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1" 30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1"
31 val=$(ip netns exec "${NETNS}" nstat -az | \ 31 val=$(ip netns exec "${NETNS}" nstat -az | \
32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}') 32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}')
33 if [ $val -ne 0 ]; then 33 if [ "$val" != 0 ]; then
34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero" 34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero"
35 return 1 35 return 1
36 fi 36 fi
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 090fff9dbc48..4c285b6e1db8 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -25,6 +25,80 @@
25#define TLS_PAYLOAD_MAX_LEN 16384 25#define TLS_PAYLOAD_MAX_LEN 16384
26#define SOL_TLS 282 26#define SOL_TLS 282
27 27
28#ifndef ENOTSUPP
29#define ENOTSUPP 524
30#endif
31
32FIXTURE(tls_basic)
33{
34 int fd, cfd;
35 bool notls;
36};
37
38FIXTURE_SETUP(tls_basic)
39{
40 struct sockaddr_in addr;
41 socklen_t len;
42 int sfd, ret;
43
44 self->notls = false;
45 len = sizeof(addr);
46
47 addr.sin_family = AF_INET;
48 addr.sin_addr.s_addr = htonl(INADDR_ANY);
49 addr.sin_port = 0;
50
51 self->fd = socket(AF_INET, SOCK_STREAM, 0);
52 sfd = socket(AF_INET, SOCK_STREAM, 0);
53
54 ret = bind(sfd, &addr, sizeof(addr));
55 ASSERT_EQ(ret, 0);
56 ret = listen(sfd, 10);
57 ASSERT_EQ(ret, 0);
58
59 ret = getsockname(sfd, &addr, &len);
60 ASSERT_EQ(ret, 0);
61
62 ret = connect(self->fd, &addr, sizeof(addr));
63 ASSERT_EQ(ret, 0);
64
65 self->cfd = accept(sfd, &addr, &len);
66 ASSERT_GE(self->cfd, 0);
67
68 close(sfd);
69
70 ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
71 if (ret != 0) {
72 ASSERT_EQ(errno, ENOENT);
73 self->notls = true;
74 printf("Failure setting TCP_ULP, testing without tls\n");
75 return;
76 }
77
78 ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
79 ASSERT_EQ(ret, 0);
80}
81
82FIXTURE_TEARDOWN(tls_basic)
83{
84 close(self->fd);
85 close(self->cfd);
86}
87
88/* Send some data through with ULP but no keys */
89TEST_F(tls_basic, base_base)
90{
91 char const *test_str = "test_read";
92 int send_len = 10;
93 char buf[10];
94
95 ASSERT_EQ(strlen(test_str) + 1, send_len);
96
97 EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
98 EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
99 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
100};
101
28FIXTURE(tls) 102FIXTURE(tls)
29{ 103{
30 int fd, cfd; 104 int fd, cfd;
@@ -165,6 +239,16 @@ TEST_F(tls, msg_more)
165 EXPECT_EQ(memcmp(buf, test_str, send_len), 0); 239 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
166} 240}
167 241
242TEST_F(tls, msg_more_unsent)
243{
244 char const *test_str = "test_read";
245 int send_len = 10;
246 char buf[10];
247
248 EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
249 EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
250}
251
168TEST_F(tls, sendmsg_single) 252TEST_F(tls, sendmsg_single)
169{ 253{
170 struct msghdr msg; 254 struct msghdr msg;
@@ -610,6 +694,42 @@ TEST_F(tls, recv_lowat)
610 EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0); 694 EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
611} 695}
612 696
697TEST_F(tls, bidir)
698{
699 char const *test_str = "test_read";
700 int send_len = 10;
701 char buf[10];
702 int ret;
703
704 if (!self->notls) {
705 struct tls12_crypto_info_aes_gcm_128 tls12;
706
707 memset(&tls12, 0, sizeof(tls12));
708 tls12.info.version = TLS_1_3_VERSION;
709 tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
710
711 ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
712 sizeof(tls12));
713 ASSERT_EQ(ret, 0);
714
715 ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
716 sizeof(tls12));
717 ASSERT_EQ(ret, 0);
718 }
719
720 ASSERT_EQ(strlen(test_str) + 1, send_len);
721
722 EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
723 EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
724 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
725
726 memset(buf, 0, sizeof(buf));
727
728 EXPECT_EQ(send(self->cfd, test_str, send_len, 0), send_len);
729 EXPECT_NE(recv(self->fd, buf, send_len, 0), -1);
730 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
731};
732
613TEST_F(tls, pollin) 733TEST_F(tls, pollin)
614{ 734{
615 char const *test_str = "test_poll"; 735 char const *test_str = "test_poll";
@@ -837,6 +957,109 @@ TEST_F(tls, control_msg)
837 EXPECT_EQ(memcmp(buf, test_str, send_len), 0); 957 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
838} 958}
839 959
960TEST_F(tls, shutdown)
961{
962 char const *test_str = "test_read";
963 int send_len = 10;
964 char buf[10];
965
966 ASSERT_EQ(strlen(test_str) + 1, send_len);
967
968 EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
969 EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
970 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
971
972 shutdown(self->fd, SHUT_RDWR);
973 shutdown(self->cfd, SHUT_RDWR);
974}
975
976TEST_F(tls, shutdown_unsent)
977{
978 char const *test_str = "test_read";
979 int send_len = 10;
980
981 EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
982
983 shutdown(self->fd, SHUT_RDWR);
984 shutdown(self->cfd, SHUT_RDWR);
985}
986
987TEST_F(tls, shutdown_reuse)
988{
989 struct sockaddr_in addr;
990 int ret;
991
992 shutdown(self->fd, SHUT_RDWR);
993 shutdown(self->cfd, SHUT_RDWR);
994 close(self->cfd);
995
996 addr.sin_family = AF_INET;
997 addr.sin_addr.s_addr = htonl(INADDR_ANY);
998 addr.sin_port = 0;
999
1000 ret = bind(self->fd, &addr, sizeof(addr));
1001 EXPECT_EQ(ret, 0);
1002 ret = listen(self->fd, 10);
1003 EXPECT_EQ(ret, -1);
1004 EXPECT_EQ(errno, EINVAL);
1005
1006 ret = connect(self->fd, &addr, sizeof(addr));
1007 EXPECT_EQ(ret, -1);
1008 EXPECT_EQ(errno, EISCONN);
1009}
1010
1011TEST(non_established) {
1012 struct tls12_crypto_info_aes_gcm_256 tls12;
1013 struct sockaddr_in addr;
1014 int sfd, ret, fd;
1015 socklen_t len;
1016
1017 len = sizeof(addr);
1018
1019 memset(&tls12, 0, sizeof(tls12));
1020 tls12.info.version = TLS_1_2_VERSION;
1021 tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
1022
1023 addr.sin_family = AF_INET;
1024 addr.sin_addr.s_addr = htonl(INADDR_ANY);
1025 addr.sin_port = 0;
1026
1027 fd = socket(AF_INET, SOCK_STREAM, 0);
1028 sfd = socket(AF_INET, SOCK_STREAM, 0);
1029
1030 ret = bind(sfd, &addr, sizeof(addr));
1031 ASSERT_EQ(ret, 0);
1032 ret = listen(sfd, 10);
1033 ASSERT_EQ(ret, 0);
1034
1035 ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
1036 EXPECT_EQ(ret, -1);
1037 /* TLS ULP not supported */
1038 if (errno == ENOENT)
1039 return;
1040 EXPECT_EQ(errno, ENOTSUPP);
1041
1042 ret = setsockopt(sfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
1043 EXPECT_EQ(ret, -1);
1044 EXPECT_EQ(errno, ENOTSUPP);
1045
1046 ret = getsockname(sfd, &addr, &len);
1047 ASSERT_EQ(ret, 0);
1048
1049 ret = connect(fd, &addr, sizeof(addr));
1050 ASSERT_EQ(ret, 0);
1051
1052 ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
1053 ASSERT_EQ(ret, 0);
1054
1055 ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
1056 EXPECT_EQ(ret, -1);
1057 EXPECT_EQ(errno, EEXIST);
1058
1059 close(fd);
1060 close(sfd);
1061}
1062
840TEST(keysizes) { 1063TEST(keysizes) {
841 struct tls12_crypto_info_aes_gcm_256 tls12; 1064 struct tls12_crypto_info_aes_gcm_256 tls12;
842 struct sockaddr_in addr; 1065 struct sockaddr_in addr;
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index fe52488a6f72..16571ac1dab4 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -321,4 +321,52 @@ else
321 ip netns exec nsr1 nft list ruleset 321 ip netns exec nsr1 nft list ruleset
322fi 322fi
323 323
324KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
325KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
326SPI1=$RANDOM
327SPI2=$RANDOM
328
329if [ $SPI1 -eq $SPI2 ]; then
330 SPI2=$((SPI2+1))
331fi
332
333do_esp() {
334 local ns=$1
335 local me=$2
336 local remote=$3
337 local lnet=$4
338 local rnet=$5
339 local spi_out=$6
340 local spi_in=$7
341
342 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
343 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
344
345 # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
346 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow
347 # to fwd decrypted packets after esp processing:
348 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 1 action allow
349
350}
351
352do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
353
354do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
355
356ip netns exec nsr1 nft delete table ip nat
357
358# restore default routes
359ip -net ns2 route del 192.168.10.1 via 10.0.2.1
360ip -net ns2 route add default via 10.0.2.1
361ip -net ns2 route add default via dead:2::1
362
363test_tcp_forwarding ns1 ns2
364if [ $? -eq 0 ] ;then
365 echo "PASS: ipsec tunnel mode for ns1/ns2"
366else
367 echo "FAIL: ipsec tunnel mode for ns1/ns2"
368 ip netns exec nsr1 nft list ruleset 1>&2
369 ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
370fi
371
324exit $ret 372exit $ret
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 7eaa8a3de262..b632965e60eb 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void)
339 339
340 ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0); 340 ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
341 if (ret < 0) { 341 if (ret < 0) {
342 /*
343 * pidfd_send_signal() will currently return ENOSYS when
344 * CONFIG_PROC_FS is not set.
345 */
346 if (errno == ENOSYS) 342 if (errno == ENOSYS)
347 ksft_exit_skip( 343 ksft_exit_skip(
348 "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n", 344 "%s test: pidfd_send_signal() syscall not supported\n",
349 test_name); 345 test_name);
350 346
351 ksft_exit_fail_msg("%s test: Failed to send signal\n", 347 ksft_exit_fail_msg("%s test: Failed to send signal\n",
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
index affa7f2d9670..9539cffa9e5e 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
@@ -64,7 +64,7 @@ class SubPlugin(TdcPlugin):
64 cmdlist.insert(0, self.args.NAMES['NS']) 64 cmdlist.insert(0, self.args.NAMES['NS'])
65 cmdlist.insert(0, 'exec') 65 cmdlist.insert(0, 'exec')
66 cmdlist.insert(0, 'netns') 66 cmdlist.insert(0, 'netns')
67 cmdlist.insert(0, 'ip') 67 cmdlist.insert(0, self.args.NAMES['IP'])
68 else: 68 else:
69 pass 69 pass
70 70
@@ -78,16 +78,16 @@ class SubPlugin(TdcPlugin):
78 return command 78 return command
79 79
80 def _ports_create(self): 80 def _ports_create(self):
81 cmd = 'ip link add $DEV0 type veth peer name $DEV1' 81 cmd = '$IP link add $DEV0 type veth peer name $DEV1'
82 self._exec_cmd('pre', cmd) 82 self._exec_cmd('pre', cmd)
83 cmd = 'ip link set $DEV0 up' 83 cmd = '$IP link set $DEV0 up'
84 self._exec_cmd('pre', cmd) 84 self._exec_cmd('pre', cmd)
85 if not self.args.namespace: 85 if not self.args.namespace:
86 cmd = 'ip link set $DEV1 up' 86 cmd = '$IP link set $DEV1 up'
87 self._exec_cmd('pre', cmd) 87 self._exec_cmd('pre', cmd)
88 88
89 def _ports_destroy(self): 89 def _ports_destroy(self):
90 cmd = 'ip link del $DEV0' 90 cmd = '$IP link del $DEV0'
91 self._exec_cmd('post', cmd) 91 self._exec_cmd('post', cmd)
92 92
93 def _ns_create(self): 93 def _ns_create(self):
@@ -97,16 +97,16 @@ class SubPlugin(TdcPlugin):
97 ''' 97 '''
98 self._ports_create() 98 self._ports_create()
99 if self.args.namespace: 99 if self.args.namespace:
100 cmd = 'ip netns add {}'.format(self.args.NAMES['NS']) 100 cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
101 self._exec_cmd('pre', cmd) 101 self._exec_cmd('pre', cmd)
102 cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS']) 102 cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
103 self._exec_cmd('pre', cmd) 103 self._exec_cmd('pre', cmd)
104 cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) 104 cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
105 self._exec_cmd('pre', cmd) 105 self._exec_cmd('pre', cmd)
106 if self.args.device: 106 if self.args.device:
107 cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS']) 107 cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
108 self._exec_cmd('pre', cmd) 108 self._exec_cmd('pre', cmd)
109 cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) 109 cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
110 self._exec_cmd('pre', cmd) 110 self._exec_cmd('pre', cmd)
111 111
112 def _ns_destroy(self): 112 def _ns_destroy(self):
@@ -115,7 +115,7 @@ class SubPlugin(TdcPlugin):
115 devices as well) 115 devices as well)
116 ''' 116 '''
117 if self.args.namespace: 117 if self.args.namespace:
118 cmd = 'ip netns delete {}'.format(self.args.NAMES['NS']) 118 cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
119 self._exec_cmd('post', cmd) 119 self._exec_cmd('post', cmd)
120 120
121 def _exec_cmd(self, stage, command): 121 def _exec_cmd(self, stage, command):
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index bf5ebf59c2d4..9cdd2e31ac2c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -670,5 +670,52 @@
670 "teardown": [ 670 "teardown": [
671 "$TC actions flush action skbedit" 671 "$TC actions flush action skbedit"
672 ] 672 ]
673 },
674 {
675 "id": "630c",
676 "name": "Add batch of 32 skbedit actions with all parameters and cookie",
677 "category": [
678 "actions",
679 "skbedit"
680 ],
681 "setup": [
682 [
683 "$TC actions flush action skbedit",
684 0,
685 1,
686 255
687 ]
688 ],
689 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
690 "expExitCode": "0",
691 "verifyCmd": "$TC actions list action skbedit",
692 "matchPattern": "^[ \t]+index [0-9]+ ref",
693 "matchCount": "32",
694 "teardown": [
695 "$TC actions flush action skbedit"
696 ]
697 },
698 {
699 "id": "706d",
700 "name": "Delete batch of 32 skbedit actions with all parameters",
701 "category": [
702 "actions",
703 "skbedit"
704 ],
705 "setup": [
706 [
707 "$TC actions flush action skbedit",
708 0,
709 1,
710 255
711 ],
712 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
713 ],
714 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
715 "expExitCode": "0",
716 "verifyCmd": "$TC actions list action skbedit",
717 "matchPattern": "^[ \t]+index [0-9]+ ref",
718 "matchCount": "0",
719 "teardown": []
673 } 720 }
674] 721]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index cc7c7d758008..6503b1ce091f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -713,5 +713,99 @@
713 "teardown": [ 713 "teardown": [
714 "$TC actions flush action vlan" 714 "$TC actions flush action vlan"
715 ] 715 ]
716 },
717 {
718 "id": "294e",
719 "name": "Add batch of 32 vlan push actions with cookie",
720 "category": [
721 "actions",
722 "vlan"
723 ],
724 "setup": [
725 [
726 "$TC actions flush action vlan",
727 0,
728 1,
729 255
730 ]
731 ],
732 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan push protocol 802.1q id 4094 priority 7 pipe index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
733 "expExitCode": "0",
734 "verifyCmd": "$TC actions list action vlan",
735 "matchPattern": "^[ \t]+index [0-9]+ ref",
736 "matchCount": "32",
737 "teardown": [
738 "$TC actions flush action vlan"
739 ]
740 },
741 {
742 "id": "56f7",
743 "name": "Delete batch of 32 vlan push actions",
744 "category": [
745 "actions",
746 "vlan"
747 ],
748 "setup": [
749 [
750 "$TC actions flush action vlan",
751 0,
752 1,
753 255
754 ],
755 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan push protocol 802.1q id 4094 priority 7 pipe index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
756 ],
757 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
758 "expExitCode": "0",
759 "verifyCmd": "$TC actions list action vlan",
760 "matchPattern": "^[ \t]+index [0-9]+ ref",
761 "matchCount": "0",
762 "teardown": []
763 },
764 {
765 "id": "759f",
766 "name": "Add batch of 32 vlan pop actions with cookie",
767 "category": [
768 "actions",
769 "vlan"
770 ],
771 "setup": [
772 [
773 "$TC actions flush action vlan",
774 0,
775 1,
776 255
777 ]
778 ],
779 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan pop continue index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
780 "expExitCode": "0",
781 "verifyCmd": "$TC actions list action vlan",
782 "matchPattern": "^[ \t]+index [0-9]+ ref",
783 "matchCount": "32",
784 "teardown": [
785 "$TC actions flush action vlan"
786 ]
787 },
788 {
789 "id": "c84a",
790 "name": "Delete batch of 32 vlan pop actions",
791 "category": [
792 "actions",
793 "vlan"
794 ],
795 "setup": [
796 [
797 "$TC actions flush action vlan",
798 0,
799 1,
800 255
801 ],
802 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan pop index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
803 ],
804 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
805 "expExitCode": "0",
806 "verifyCmd": "$TC actions list action vlan",
807 "matchPattern": "^[ \t]+index [0-9]+ ref",
808 "matchCount": "0",
809 "teardown": []
716 } 810 }
717] 811]
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index 4602326b8f5b..a4f4d4cf22c3 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -451,7 +451,7 @@ static int test_vsys_x(void)
451 printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n", 451 printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n",
452 segv_err); 452 segv_err);
453 } else { 453 } else {
454 printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n", 454 printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n",
455 segv_err); 455 segv_err);
456 return 1; 456 return 1;
457 } 457 }
diff --git a/usr/include/Makefile b/usr/include/Makefile
index aa316d99e035..1fb6abe29b2f 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -101,10 +101,6 @@ ifeq ($(SRCARCH),riscv)
101header-test- += linux/bpf_perf_event.h 101header-test- += linux/bpf_perf_event.h
102endif 102endif
103 103
104ifeq ($(SRCARCH),s390)
105header-test- += asm/zcrypt.h
106endif
107
108ifeq ($(SRCARCH),sparc) 104ifeq ($(SRCARCH),sparc)
109header-test- += asm/stat.h 105header-test- += asm/stat.h
110header-test- += asm/uctx.h 106header-test- += asm/uctx.h
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index f645c0fbf7ec..35a069815baf 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -144,11 +144,6 @@ out_fail_alloc:
144 return ret; 144 return ret;
145} 145}
146 146
147bool kvm_arch_has_vcpu_debugfs(void)
148{
149 return false;
150}
151
152int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 147int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
153{ 148{
154 return 0; 149 return 0;
@@ -323,6 +318,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
323 318
324void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 319void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
325{ 320{
321 /*
322 * If we're about to block (most likely because we've just hit a
323 * WFI), we need to sync back the state of the GIC CPU interface
324 * so that we have the lastest PMR and group enables. This ensures
325 * that kvm_arch_vcpu_runnable has up-to-date data to decide
326 * whether we have pending interrupts.
327 */
328 preempt_disable();
329 kvm_vgic_vmcr_sync(vcpu);
330 preempt_enable();
331
326 kvm_vgic_v4_enable_doorbell(vcpu); 332 kvm_vgic_v4_enable_doorbell(vcpu);
327} 333}
328 334
@@ -340,6 +346,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
340 /* Set up the timer */ 346 /* Set up the timer */
341 kvm_timer_vcpu_init(vcpu); 347 kvm_timer_vcpu_init(vcpu);
342 348
349 kvm_pmu_vcpu_init(vcpu);
350
343 kvm_arm_reset_debug_ptr(vcpu); 351 kvm_arm_reset_debug_ptr(vcpu);
344 352
345 return kvm_vgic_vcpu_init(vcpu); 353 return kvm_vgic_vcpu_init(vcpu);
@@ -727,7 +735,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
727 * Ensure we set mode to IN_GUEST_MODE after we disable 735 * Ensure we set mode to IN_GUEST_MODE after we disable
728 * interrupts and before the final VCPU requests check. 736 * interrupts and before the final VCPU requests check.
729 * See the comment in kvm_vcpu_exiting_guest_mode() and 737 * See the comment in kvm_vcpu_exiting_guest_mode() and
730 * Documentation/virtual/kvm/vcpu-requests.rst 738 * Documentation/virt/kvm/vcpu-requests.rst
731 */ 739 */
732 smp_store_mb(vcpu->mode, IN_GUEST_MODE); 740 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
733 741
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 254c5f190a3d..ccf1fde9836c 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -349,8 +349,10 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
349 case 7: 349 case 7:
350 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); 350 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
351 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); 351 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
352 /* Fall through */
352 case 6: 353 case 6:
353 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); 354 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
355 /* Fall through */
354 default: 356 default:
355 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); 357 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
356 } 358 }
@@ -359,8 +361,10 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
359 case 7: 361 case 7:
360 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); 362 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
361 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); 363 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
364 /* Fall through */
362 case 6: 365 case 6:
363 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); 366 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
367 /* Fall through */
364 default: 368 default:
365 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); 369 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
366 } 370 }
@@ -382,8 +386,10 @@ void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
382 case 7: 386 case 7:
383 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); 387 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
384 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); 388 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
389 /* Fall through */
385 case 6: 390 case 6:
386 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); 391 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
392 /* Fall through */
387 default: 393 default:
388 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); 394 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
389 } 395 }
@@ -392,8 +398,10 @@ void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
392 case 7: 398 case 7:
393 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); 399 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
394 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); 400 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
401 /* Fall through */
395 case 6: 402 case 6:
396 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); 403 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
404 /* Fall through */
397 default: 405 default:
398 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); 406 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
399 } 407 }
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index a8a6a0c883f1..6af5c91337f2 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
86 unsigned int len; 86 unsigned int len;
87 int mask; 87 int mask;
88 88
89 /* Detect an already handled MMIO return */
90 if (unlikely(!vcpu->mmio_needed))
91 return 0;
92
93 vcpu->mmio_needed = 0;
94
89 if (!run->mmio.is_write) { 95 if (!run->mmio.is_write) {
90 len = run->mmio.len; 96 len = run->mmio.len;
91 if (len > sizeof(unsigned long)) 97 if (len > sizeof(unsigned long))
@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 run->mmio.is_write = is_write; 194 run->mmio.is_write = is_write;
189 run->mmio.phys_addr = fault_ipa; 195 run->mmio.phys_addr = fault_ipa;
190 run->mmio.len = len; 196 run->mmio.len = len;
197 vcpu->mmio_needed = 1;
191 198
192 if (!ret) { 199 if (!ret) {
193 /* We handled the access successfully in the kernel. */ 200 /* We handled the access successfully in the kernel. */
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 3dd8238ed246..362a01886bab 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -215,6 +215,20 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
215} 215}
216 216
217/** 217/**
218 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
219 * @vcpu: The vcpu pointer
220 *
221 */
222void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
223{
224 int i;
225 struct kvm_pmu *pmu = &vcpu->arch.pmu;
226
227 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
228 pmu->pmc[i].idx = i;
229}
230
231/**
218 * kvm_pmu_vcpu_reset - reset pmu state for cpu 232 * kvm_pmu_vcpu_reset - reset pmu state for cpu
219 * @vcpu: The vcpu pointer 233 * @vcpu: The vcpu pointer
220 * 234 *
@@ -224,10 +238,8 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
224 int i; 238 int i;
225 struct kvm_pmu *pmu = &vcpu->arch.pmu; 239 struct kvm_pmu *pmu = &vcpu->arch.pmu;
226 240
227 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { 241 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
228 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); 242 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
229 pmu->pmc[i].idx = i;
230 }
231 243
232 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); 244 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
233} 245}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index bdbc297d06fb..e621b5d45b27 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -8,6 +8,7 @@
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h> 10#include <kvm/arm_vgic.h>
11#include <asm/kvm_emulate.h>
11#include <asm/kvm_mmu.h> 12#include <asm/kvm_mmu.h>
12#include "vgic.h" 13#include "vgic.h"
13 14
@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
164 irq->vcpu = NULL; 165 irq->vcpu = NULL;
165 irq->target_vcpu = vcpu0; 166 irq->target_vcpu = vcpu0;
166 kref_init(&irq->refcount); 167 kref_init(&irq->refcount);
167 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { 168 switch (dist->vgic_model) {
169 case KVM_DEV_TYPE_ARM_VGIC_V2:
168 irq->targets = 0; 170 irq->targets = 0;
169 irq->group = 0; 171 irq->group = 0;
170 } else { 172 break;
173 case KVM_DEV_TYPE_ARM_VGIC_V3:
171 irq->mpidr = 0; 174 irq->mpidr = 0;
172 irq->group = 1; 175 irq->group = 1;
176 break;
177 default:
178 kfree(dist->spis);
179 return -EINVAL;
173 } 180 }
174 } 181 }
175 return 0; 182 return 0;
@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
209 irq->intid = i; 216 irq->intid = i;
210 irq->vcpu = NULL; 217 irq->vcpu = NULL;
211 irq->target_vcpu = vcpu; 218 irq->target_vcpu = vcpu;
212 irq->targets = 1U << vcpu->vcpu_id;
213 kref_init(&irq->refcount); 219 kref_init(&irq->refcount);
214 if (vgic_irq_is_sgi(i)) { 220 if (vgic_irq_is_sgi(i)) {
215 /* SGIs */ 221 /* SGIs */
@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
219 /* PPIs */ 225 /* PPIs */
220 irq->config = VGIC_CONFIG_LEVEL; 226 irq->config = VGIC_CONFIG_LEVEL;
221 } 227 }
222
223 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
224 irq->group = 1;
225 else
226 irq->group = 0;
227 } 228 }
228 229
229 if (!irqchip_in_kernel(vcpu->kvm)) 230 if (!irqchip_in_kernel(vcpu->kvm))
@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
286 287
287 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 288 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
288 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 289 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
289 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 290 switch (dist->vgic_model) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
290 irq->group = 1; 292 irq->group = 1;
291 else 293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
292 irq->group = 0; 296 irq->group = 0;
297 irq->targets = 1U << idx;
298 break;
299 default:
300 ret = -EINVAL;
301 goto out;
302 }
293 } 303 }
294 } 304 }
295 305
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 936962abc38d..c45e2d7e942f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -250,7 +250,7 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
250 * pending state of interrupt is latched in pending_latch variable. 250 * pending state of interrupt is latched in pending_latch variable.
251 * Userspace will save and restore pending state and line_level 251 * Userspace will save and restore pending state and line_level
252 * separately. 252 * separately.
253 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt 253 * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.txt
254 * for handling of ISPENDR and ICPENDR. 254 * for handling of ISPENDR and ICPENDR.
255 */ 255 */
256 for (i = 0; i < len * 8; i++) { 256 for (i = 0; i < len * 8; i++) {
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 3ba7278fb533..0d090482720d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -113,6 +113,22 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
113 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 113 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
114 114
115 raw_spin_lock_irqsave(&irq->irq_lock, flags); 115 raw_spin_lock_irqsave(&irq->irq_lock, flags);
116 if (vgic_irq_is_mapped_level(irq)) {
117 bool was_high = irq->line_level;
118
119 /*
120 * We need to update the state of the interrupt because
121 * the guest might have changed the state of the device
122 * while the interrupt was disabled at the VGIC level.
123 */
124 irq->line_level = vgic_get_phys_line_level(irq);
125 /*
126 * Deactivate the physical interrupt so the GIC will let
127 * us know when it is asserted again.
128 */
129 if (!irq->active && was_high && !irq->line_level)
130 vgic_irq_set_phys_active(irq, false);
131 }
116 irq->enabled = true; 132 irq->enabled = true;
117 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 133 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
118 134
@@ -195,6 +211,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
195 vgic_irq_set_phys_active(irq, true); 211 vgic_irq_set_phys_active(irq, true);
196} 212}
197 213
214static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
215{
216 return (vgic_irq_is_sgi(irq->intid) &&
217 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
218}
219
198void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, 220void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
199 gpa_t addr, unsigned int len, 221 gpa_t addr, unsigned int len,
200 unsigned long val) 222 unsigned long val)
@@ -207,6 +229,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
207 for_each_set_bit(i, &val, len * 8) { 229 for_each_set_bit(i, &val, len * 8) {
208 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 230 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
209 231
232 /* GICD_ISPENDR0 SGI bits are WI */
233 if (is_vgic_v2_sgi(vcpu, irq)) {
234 vgic_put_irq(vcpu->kvm, irq);
235 continue;
236 }
237
210 raw_spin_lock_irqsave(&irq->irq_lock, flags); 238 raw_spin_lock_irqsave(&irq->irq_lock, flags);
211 if (irq->hw) 239 if (irq->hw)
212 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 240 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -254,6 +282,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
254 for_each_set_bit(i, &val, len * 8) { 282 for_each_set_bit(i, &val, len * 8) {
255 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 283 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
256 284
285 /* GICD_ICPENDR0 SGI bits are WI */
286 if (is_vgic_v2_sgi(vcpu, irq)) {
287 vgic_put_irq(vcpu->kvm, irq);
288 continue;
289 }
290
257 raw_spin_lock_irqsave(&irq->irq_lock, flags); 291 raw_spin_lock_irqsave(&irq->irq_lock, flags);
258 292
259 if (irq->hw) 293 if (irq->hw)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 6dd5ad706c92..b00aa304c260 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
184 if (vgic_irq_is_sgi(irq->intid)) { 184 if (vgic_irq_is_sgi(irq->intid)) {
185 u32 src = ffs(irq->source); 185 u32 src = ffs(irq->source);
186 186
187 BUG_ON(!src); 187 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
188 irq->intid))
189 return;
190
188 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
189 irq->source &= ~(1 << (src - 1)); 192 irq->source &= ~(1 << (src - 1));
190 if (irq->source) { 193 if (irq->source) {
@@ -484,10 +487,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
484 kvm_vgic_global_state.vctrl_base + GICH_APR); 487 kvm_vgic_global_state.vctrl_base + GICH_APR);
485} 488}
486 489
487void vgic_v2_put(struct kvm_vcpu *vcpu) 490void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
488{ 491{
489 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 492 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
490 493
491 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); 494 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
495}
496
497void vgic_v2_put(struct kvm_vcpu *vcpu)
498{
499 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
500
501 vgic_v2_vmcr_sync(vcpu);
492 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); 502 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
493} 503}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index c2c9ce009f63..a4ad431c92a9 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
167 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 167 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
168 u32 src = ffs(irq->source); 168 u32 src = ffs(irq->source);
169 169
170 BUG_ON(!src); 170 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
171 irq->intid))
172 return;
173
171 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 174 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
172 irq->source &= ~(1 << (src - 1)); 175 irq->source &= ~(1 << (src - 1));
173 if (irq->source) { 176 if (irq->source) {
@@ -662,12 +665,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
662 __vgic_v3_activate_traps(vcpu); 665 __vgic_v3_activate_traps(vcpu);
663} 666}
664 667
665void vgic_v3_put(struct kvm_vcpu *vcpu) 668void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
666{ 669{
667 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 670 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
668 671
669 if (likely(cpu_if->vgic_sre)) 672 if (likely(cpu_if->vgic_sre))
670 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); 673 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
674}
675
676void vgic_v3_put(struct kvm_vcpu *vcpu)
677{
678 vgic_v3_vmcr_sync(vcpu);
671 679
672 kvm_call_hyp(__vgic_v3_save_aprs, vcpu); 680 kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
673 681
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 04786c8ec77e..e7bde65ba67c 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
254 bool penda, pendb; 254 bool penda, pendb;
255 int ret; 255 int ret;
256 256
257 /*
258 * list_sort may call this function with the same element when
259 * the list is fairly long.
260 */
261 if (unlikely(irqa == irqb))
262 return 0;
263
257 raw_spin_lock(&irqa->irq_lock); 264 raw_spin_lock(&irqa->irq_lock);
258 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 265 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
259 266
@@ -919,6 +926,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
919 vgic_v3_put(vcpu); 926 vgic_v3_put(vcpu);
920} 927}
921 928
929void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
930{
931 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
932 return;
933
934 if (kvm_vgic_global_state.type == VGIC_V2)
935 vgic_v2_vmcr_sync(vcpu);
936 else
937 vgic_v3_vmcr_sync(vcpu);
938}
939
922int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 940int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
923{ 941{
924 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 942 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 57205beaa981..797e05004d80 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -42,7 +42,7 @@
42 VGIC_AFFINITY_LEVEL(val, 3)) 42 VGIC_AFFINITY_LEVEL(val, 3))
43 43
44/* 44/*
45 * As per Documentation/virtual/kvm/devices/arm-vgic-v3.txt, 45 * As per Documentation/virt/kvm/devices/arm-vgic-v3.txt,
46 * below macros are defined for CPUREG encoding. 46 * below macros are defined for CPUREG encoding.
47 */ 47 */
48#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000 48#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000
@@ -63,7 +63,7 @@
63 KVM_REG_ARM_VGIC_SYSREG_OP2_MASK) 63 KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
64 64
65/* 65/*
66 * As per Documentation/virtual/kvm/devices/arm-vgic-its.txt, 66 * As per Documentation/virt/kvm/devices/arm-vgic-its.txt,
67 * below macros are defined for ITS table entry encoding. 67 * below macros are defined for ITS table entry encoding.
68 */ 68 */
69#define KVM_ITS_CTE_VALID_SHIFT 63 69#define KVM_ITS_CTE_VALID_SHIFT 63
@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
193void vgic_v2_init_lrs(void); 193void vgic_v2_init_lrs(void);
194void vgic_v2_load(struct kvm_vcpu *vcpu); 194void vgic_v2_load(struct kvm_vcpu *vcpu);
195void vgic_v2_put(struct kvm_vcpu *vcpu); 195void vgic_v2_put(struct kvm_vcpu *vcpu);
196void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
196 197
197void vgic_v2_save_state(struct kvm_vcpu *vcpu); 198void vgic_v2_save_state(struct kvm_vcpu *vcpu);
198void vgic_v2_restore_state(struct kvm_vcpu *vcpu); 199void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
223 224
224void vgic_v3_load(struct kvm_vcpu *vcpu); 225void vgic_v3_load(struct kvm_vcpu *vcpu);
225void vgic_v3_put(struct kvm_vcpu *vcpu); 226void vgic_v3_put(struct kvm_vcpu *vcpu);
227void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
226 228
227bool vgic_has_its(struct kvm *kvm); 229bool vgic_has_its(struct kvm *kvm);
228int kvm_vgic_register_its_device(void); 230int kvm_vgic_register_its_device(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 887f3b0c2b60..c6a91b044d8d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1855,8 +1855,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn)
1855 if (!kvm_is_reserved_pfn(pfn)) { 1855 if (!kvm_is_reserved_pfn(pfn)) {
1856 struct page *page = pfn_to_page(pfn); 1856 struct page *page = pfn_to_page(pfn);
1857 1857
1858 if (!PageReserved(page)) 1858 SetPageDirty(page);
1859 SetPageDirty(page);
1860 } 1859 }
1861} 1860}
1862EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1861EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
@@ -2477,6 +2476,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2477#endif 2476#endif
2478} 2477}
2479 2478
2479/*
2480 * Unlike kvm_arch_vcpu_runnable, this function is called outside
2481 * a vcpu_load/vcpu_put pair. However, for most architectures
2482 * kvm_arch_vcpu_runnable does not require vcpu_load.
2483 */
2484bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2485{
2486 return kvm_arch_vcpu_runnable(vcpu);
2487}
2488
2489static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2490{
2491 if (kvm_arch_dy_runnable(vcpu))
2492 return true;
2493
2494#ifdef CONFIG_KVM_ASYNC_PF
2495 if (!list_empty_careful(&vcpu->async_pf.done))
2496 return true;
2497#endif
2498
2499 return false;
2500}
2501
2480void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 2502void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2481{ 2503{
2482 struct kvm *kvm = me->kvm; 2504 struct kvm *kvm = me->kvm;
@@ -2506,9 +2528,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2506 continue; 2528 continue;
2507 if (vcpu == me) 2529 if (vcpu == me)
2508 continue; 2530 continue;
2509 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2531 if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
2510 continue; 2532 continue;
2511 if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu)) 2533 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
2534 !kvm_arch_vcpu_in_kernel(vcpu))
2512 continue; 2535 continue;
2513 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2536 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2514 continue; 2537 continue;
@@ -2591,30 +2614,20 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2591 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2614 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
2592} 2615}
2593 2616
2594static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 2617static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2595{ 2618{
2619#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
2596 char dir_name[ITOA_MAX_LEN * 2]; 2620 char dir_name[ITOA_MAX_LEN * 2];
2597 int ret;
2598
2599 if (!kvm_arch_has_vcpu_debugfs())
2600 return 0;
2601 2621
2602 if (!debugfs_initialized()) 2622 if (!debugfs_initialized())
2603 return 0; 2623 return;
2604 2624
2605 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 2625 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
2606 vcpu->debugfs_dentry = debugfs_create_dir(dir_name, 2626 vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
2607 vcpu->kvm->debugfs_dentry); 2627 vcpu->kvm->debugfs_dentry);
2608 if (!vcpu->debugfs_dentry)
2609 return -ENOMEM;
2610
2611 ret = kvm_arch_create_vcpu_debugfs(vcpu);
2612 if (ret < 0) {
2613 debugfs_remove_recursive(vcpu->debugfs_dentry);
2614 return ret;
2615 }
2616 2628
2617 return 0; 2629 kvm_arch_create_vcpu_debugfs(vcpu);
2630#endif
2618} 2631}
2619 2632
2620/* 2633/*
@@ -2649,9 +2662,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2649 if (r) 2662 if (r)
2650 goto vcpu_destroy; 2663 goto vcpu_destroy;
2651 2664
2652 r = kvm_create_vcpu_debugfs(vcpu); 2665 kvm_create_vcpu_debugfs(vcpu);
2653 if (r)
2654 goto vcpu_destroy;
2655 2666
2656 mutex_lock(&kvm->lock); 2667 mutex_lock(&kvm->lock);
2657 if (kvm_get_vcpu_by_id(kvm, id)) { 2668 if (kvm_get_vcpu_by_id(kvm, id)) {
@@ -4205,7 +4216,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4205{ 4216{
4206 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 4217 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4207 4218
4208 vcpu->preempted = false; 4219 WRITE_ONCE(vcpu->preempted, false);
4209 WRITE_ONCE(vcpu->ready, false); 4220 WRITE_ONCE(vcpu->ready, false);
4210 4221
4211 kvm_arch_sched_in(vcpu, cpu); 4222 kvm_arch_sched_in(vcpu, cpu);
@@ -4219,7 +4230,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
4219 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 4230 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4220 4231
4221 if (current->state == TASK_RUNNING) { 4232 if (current->state == TASK_RUNNING) {
4222 vcpu->preempted = true; 4233 WRITE_ONCE(vcpu->preempted, true);
4223 WRITE_ONCE(vcpu->ready, true); 4234 WRITE_ONCE(vcpu->ready, true);
4224 } 4235 }
4225 kvm_arch_vcpu_put(vcpu); 4236 kvm_arch_vcpu_put(vcpu);